根据osd的id定位物理磁盘

方式一:根据osd的fsid进行查找

# 1、先定位osd在哪一个host上

# 2、在对应host上查看osd对应的fsid
root@pve-ceph01:~# cat /var/lib/ceph/osd/ceph-0/fsid 
be5ea6c8-06f6-45de-ad1d-5616a8723882

# 3、根据查询得到的fsid 与lsblk中的信息进行对比
root@pve-ceph01:~# lsblk 
NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
vda                                                                                                   252:0    0   50G  0 disk 
├─vda1                                                                                                252:1    0 1007K  0 part 
├─vda2                                                                                                252:2    0  512M  0 part 
└─vda3                                                                                                252:3    0 49.5G  0 part 
  ├─pve-swap                                                                                          253:0    0    4G  0 lvm  [SWAP]
  ├─pve-root                                                                                          253:1    0 12.3G  0 lvm  /
  ├─pve-data_tmeta                                                                                    253:2    0    1G  0 lvm  
  │ └─pve-data                                                                                        253:4    0 25.1G  0 lvm  
  └─pve-data_tdata                                                                                    253:3    0 25.1G  0 lvm  
    └─pve-data                                                                                        253:4    0 25.1G  0 lvm  
vdb                                                                                                   252:16   0   30G  0 disk 
└─ceph--18ac0abe--687a--4138--9711--b17b51dc02e2-osd--block--be5ea6c8--06f6--45de--ad1d--5616a8723882 253:5    0   30G  0 lvm  
vdc                                                                                                   252:32   0   30G  0 disk 
└─ceph--dc1c6970--6943--466f--bc7c--33e616947fce-osd--block--5c326b21--96e1--42b5--b85d--5e970428bba0 253:6    0   30G  0 lvm

file

方式二:ceph-volume lvm list 9

# 注意用法
ceph-volume lvm list 9            #这个数字9表示osd的id
ceph-volume lvm list /dev/vdb     #也可以指定磁盘查看是那个osd

root@pve-ceph01:~# ceph-volume lvm list 9
====== osd.9 =======

  [block]       /dev/ceph-4597d1b3-8f62-4791-b6c2-4d459167fdee/osd-block-273c0d18-4586-4655-977f-d6f3fddbea99

      block device              /dev/ceph-4597d1b3-8f62-4791-b6c2-4d459167fdee/osd-block-273c0d18-4586-4655-977f-d6f3fddbea99
      block uuid                dMGeWO-E7BK-H89R-c6m3-cRWE-Q0Ns-b5pXkx
      cephx lockbox secret      
      cluster fsid              95a809b9-5e68-4a73-9964-43e246df732d
      cluster name              ceph
      crush device class        
      encrypted                 0
      osd fsid                  273c0d18-4586-4655-977f-d6f3fddbea99
      osd id                    9
      osdspec affinity          
      type                      block
      vdo                       0
      devices                   /dev/vdd        #这就把是那块磁盘打印出来了

file

方式三(推荐方式)

## 查看ceph 组成设备(查看osd在那个节点,对应哪个磁盘)
    root@fuse01:~# ceph device ls
    DEVICE                                     HOST:DEV        DAEMONS                   WEAR  LIFE EXPECTANCY
    INTEL_SSDSC2BF180A5_CVTS603500GF180IGN                                                63%                 
    SEAGATE_ST8000NM001A_WKD0N0W00000E0033NP4  fuse03:sdg      osd.8                                          
    SEAGATE_ST8000NM001A_WKD0NKNV0000E0077MJB  fuse02:sdb      osd.5                                          
    SEAGATE_ST8000NM001A_WKD0NWQW0000E0075D0K  fuse02:sdc      osd.6                                          
    SEAGATE_ST8000NM001A_WKD0PW0K0000E0075D56  fuse01:sdc      osd.3                                          
    SEAGATE_ST8000NM001A_WKD0PW0P0000E0068VD0  fuse01:sdg      osd.9                                          
    SEAGATE_ST8000NM001A_WKD0PW490000E0079N3C  fuse03:sdb      osd.7                                          
    ST4000NM0035-1V4107_ZC15DLQ6               fuse02:sdf      osd.13                                         
    ST4000NM0035-1V4107_ZC15DN01               fuse03:sdc      osd.14                                         
    ST4000NM0035-1V4107_ZC15DXTW               fuse01:sde      osd.4                                          
    ST4000NM0035-1V4107_ZC15ECYF               fuse01:sdb      osd.2                                          
    Samsung_SSD_970_PRO_512GB_S5JYNG0N102119W  fuse03:nvme0n1  osd.1 osd.14 osd.7 osd.8   39%                 
    Samsung_SSD_970_PRO_512GB_S5JYNG0N102182X  fuse02:nvme0n1  osd.0 osd.13 osd.5 osd.6   32%                 
    Samsung_SSD_970_PRO_512GB_S5JYNG0N102200X  fuse01:nvme0n1  osd.2 osd.3 osd.4 osd.9    42%                 
    SanDisk_Ultra_II_240GB_170634802005        fuse02:sda      mon.fuse02                  0%                 
    SanDisk_Ultra_II_240GB_171123800593        fuse03:sda      mon.fuse03                  0%                 
    SanDisk_Ultra_II_240GB_174219800886        fuse01:sda      mon.fuse01                 49%                 
    TOSHIBA_MD04ABA400V_28EGKWSVFMYB           fuse03:sdh      osd.1                                          
    WDC_WD40EZRZ-00GXCB0_WD-WCC7K7VYYDH3       fuse02:sdh      osd.0     

    root@fuse01:~# ceph device info ST4000NM0035-1V4107_ZC15ECYF
    device ST4000NM0035-1V4107_ZC15ECYF
    attachment fuse01 sdb 
    osd.2

    root@fuse01:~# ceph device ls | grep osd.8
    SEAGATE_ST8000NM001A_WKD0N0W00000E0033NP4  fuse03:sdg      osd.8    
    Samsung_SSD_970_PRO_512GB_S5JYNG0N102119W  fuse03:nvme0n1  osd.1 osd.14 osd.7 osd.8   39%                 

方式四

root@QV1101:~# ceph osd metadata 108
{
    "id": 108,
    "arch": "x86_64",
    "back_addr": "[v2:10.10.10.113:6850/1026735,v1:10.10.10.113:6851/1026735]",
    "back_iface": "",
    "bluefs": "1",
    "bluefs_dedicated_db": "0",
    "bluefs_dedicated_wal": "0",
    "bluefs_single_shared_device": "1",
    "bluestore_bdev_access_mode": "blk",
    "bluestore_bdev_block_size": "4096",
    "bluestore_bdev_dev_node": "/dev/dm-11",
    "bluestore_bdev_devices": "sdh",
    "bluestore_bdev_driver": "KernelDevice",
    "bluestore_bdev_partition_path": "/dev/dm-11",
    "bluestore_bdev_rotational": "1",
    "bluestore_bdev_size": "4000783007744",
    "bluestore_bdev_support_discard": "0",
    "bluestore_bdev_type": "hdd",
    "ceph_release": "octopus",
    "ceph_version": "ceph version 15.2.17 (542df8d06ef24dbddcf4994db16bcc4c89c9ec2d) octopus (stable)",
    "ceph_version_short": "15.2.17",
    "cpu": "Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz",
    "default_device_class": "hdd",
    "device_ids": "sdh=TOSHIBA_MG08ADA400N_83P0A09AFYPG",
    "device_paths": "sdh=/dev/disk/by-path/pci-0000:81:00.0-sas-phy6-lun-0",
    "devices": "sdh",
    "distro": "debian",
    "distro_description": "Debian GNU/Linux 11 (bullseye)",
    "distro_version": "11",
    "front_addr": "[v2:10.10.10.113:6848/1026735,v1:10.10.10.113:6849/1026735]",
    "front_iface": "",
    "hb_back_addr": "[v2:10.10.10.113:6854/1026735,v1:10.10.10.113:6855/1026735]",
    "hb_front_addr": "[v2:10.10.10.113:6852/1026735,v1:10.10.10.113:6853/1026735]",
    "hostname": "QV1102",
    "journal_rotational": "1",
    "kernel_description": "#1 SMP PVE 5.15.30-3 (Fri, 22 Apr 2022 18:08:27 +0200)",
    "kernel_version": "5.15.30-2-pve",
    "mem_swap_kb": "8388604",
    "mem_total_kb": "65846088",
    "network_numa_unknown_ifaces": "back_iface,front_iface",
    "objectstore_numa_unknown_devices": "sdh",
    "os": "Linux",
    "osd_data": "/var/lib/ceph/osd/ceph-108",
    "osd_objectstore": "bluestore",
    "osdspec_affinity": "",
    "rotational": "1"
}
声明:本文为原创,作者为 辣条①号,转载时请保留本声明及附带文章链接:https://boke.wsfnk.com/archives/1171.html
谢谢你请我吃辣条谢谢你请我吃辣条

如果文章对你有帮助,欢迎点击上方按钮打赏作者

最后编辑于:2023/12/26作者: 辣条①号

目标:网络规划设计师、系统工程师、ceph存储工程师、云计算工程师。 不负遇见,不谈亏欠!

暂无评论

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注

arrow grin ! ? cool roll eek evil razz mrgreen smile oops lol mad twisted wink idea cry shock neutral sad ???

文章目录