21 Mayıs 2020 Perşembe

ceph notları



# subscription-manager register
# subscription-manager refresh
# subscription-manager list --available --all --matches="*Ceph*"
# subscription-manager attach --pool=$POOL_ID
# subscription-manager repos --disable=*
# subscription-manager repos --enable=rhel-7-server-rpms
# subscription-manager repos --enable=rhel-7-server-extras-rpms
# yum update

# yum install yum-utils vim -y
# yum-config-manager --disable epel

MONs
# subscription-manager repos --enable=rhel-7-server-rhceph-3-mon-rpms

OSDs
# subscription-manager repos --enable=rhel-7-server-rhceph-3-osd-rpms

Red Hat Ansible Engine administration node
# subscription-manager repos --enable=rhel-7-server-rhceph-3-tools-rpms --enable=rhel-7-server-ansible-2.6-rpms


Red Hat Ceph Storage file system (MDS)
# subscription-manager repos --enable=rhel-7-server-rhceph-3-tools-rpms


(Optional) Red Hat Ceph Storage object gateway
# subscription-manager repos --enable=rhel-7-server-rhceph-3-tools-rpms

Client node
# subscription-manager repos --enable=rhel-7-server-rhceph-3-tools-rpms





# ceph osd pool create test 8
# rados --pool test put hello-world hello-world.txt
# rados --pool test get hello-world fetch.txt

osd tree;


ceph mon dump






Changing Default Attributes
New pools use defaults you choose for environment

osd pool default size = x

osd pool default min_size = x

osd pool default pg_num = x

osd pool default pgp_num = x

osd pool default crush_replicated_ruleset = x


Create erasure-coded pool

ceph osd pool create <poolname> erasure [<ec-profile-name>]

Default: default profile used

List existing profiles

ceph osd erasure-code-profile ls

View specific profile

ceph osd erasure-code-profile get <profilename>

Remove specific profile

ceph osd erasure-code-profile rm <profilename>


list osd pools



pool delete;
ceph osd pool delete <poolname> <poolname> --yes-i-really-really-mean-it


ceph disk kullanımı;







ceph rbd client lar için erişim oluşturmak;


Kullanıcı erişimi oluşturmadan önce kullanıcının erişeceği diski oluşturmak gerekiyor;

# rbd create rbdpool1/disk01 --size=10G

Kullanıcının kerneli aşağıdakilerini desteklemediğinden devre dışı bırakıyoruz.

# rbd feature disable rbdpool1/disk01 object-map fast-diff deep-flatten

kullanıcıya erişim izini veriyoruz.
#ceph auth get-or-create client.ceph4-client1 mon 'allow r' osd 'allow rwx pool=rbdpool1' -o  /etc/ceph/ceph.client.ceph4-client1.keyring

# ceph auth ls
komutuyla baktığımızda aşağıdakini görmemiz gerekiyor;
...
client.rbd.client1
        key: AQALyMVervO2GBAA3S1SCYfXuzWU8HCab52lYQ==
        caps: [mon] allow r
        caps: [osd] allow rwx
...



erasure-coded pools:
[root@rhcs4-osd1 ~]# ls -la /usr/lib64/ceph/erasure-code/
total 5960
drwxr-xr-x. 2 root root     287 Aug 27 17:04 .
drwxr-xr-x. 5 root root     122 Aug 27 17:04 ..
-rwxr-xr-x. 1 root root  898872 Aug  8 07:48 libec_clay.so
-rwxr-xr-x. 1 root root  183616 Aug  8 07:48 libec_isa.so
-rwxr-xr-x. 1 root root  357160 Aug  8 07:48 libec_jerasure_generic.so
-rwxr-xr-x. 1 root root  357160 Aug  8 07:48 libec_jerasure.so
-rwxr-xr-x. 1 root root  357152 Aug  8 07:48 libec_jerasure_sse3.so
-rwxr-xr-x. 1 root root  357152 Aug  8 07:48 libec_jerasure_sse4.so
-rwxr-xr-x. 1 root root 2034600 Aug  8 07:48 libec_lrc.so
-rwxr-xr-x. 1 root root  381864 Aug  8 07:48 libec_shec_generic.so
-rwxr-xr-x. 1 root root  381848 Aug  8 07:48 libec_shec.so
-rwxr-xr-x. 1 root root  381864 Aug  8 07:48 libec_shec_sse3.so
-rwxr-xr-x. 1 root root  381864 Aug  8 07:48 libec_shec_sse4.so
[root@rhcs4-osd1 ~]#

root@rhcs4-osd1 ~]# lscpu
Architecture:        x86_64
CPU op-mode(s):      32-bit, 64-bit
Byte Order:          Little Endian
CPU(s):              2
On-line CPU(s) list: 0,1
Thread(s) per core:  1
Core(s) per socket:  1
Socket(s):           2
NUMA node(s):        1
Vendor ID:           GenuineIntel
CPU family:          6
Model:               94
Model name:          Intel Core Processor (Skylake, IBRS)
Stepping:            3
CPU MHz:             2808.008
BogoMIPS:            5616.01
Virtualization:      VT-x
Hypervisor vendor:   KVM
Virtualization type: full
L1d cache:           32K
L1i cache:           32K
L2 cache:            4096K
L3 cache:            16384K
NUMA node0 CPU(s):   0,1
Flags:               fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt xsaveopt xsavec xgetbv1 xsaves arat umip md_clear arch_capabilities
[root@rhcs4-osd1 ~]# lscpu  |grep sse
Flags:               fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt xsaveopt xsavec xgetbv1 xsaves arat umip md_clear arch_capabilities
[root@rhcs4-osd1 ~]# logout
Connection to rhcs4-osd1 closed.

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : ~
Saat       : 05:17 PM
Yeni Komut [106]# ceph osd erasure-code-profile set profile-erasure-arsiv k=2 m=1 crush-failure-domain=host

k=2 olmasının nedeni 3 tane osd makinemizin olması. Özetle 2 data diskli, tek parite diskli raid 5 gibi oluyor.

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : ~
Saat       : 05:18 PM
Yeni Komut [107]# ceph osd erasure-code-profile ls
default
profile-erasure-arsiv

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : ~
Saat       : 05:18 PM
Yeni Komut [108]#
Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : ~
Saat       : 05:18 PM
Yeni Komut [108]# ceph osd erasure-code-profile get profile-erasure-arsiv
crush-device-class=
crush-failure-domain=host
crush-root=default
jerasure-per-chunk-alignment=false
k=2
m=1

plugin=jerasure
technique=reed_sol_van
w=8

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : ~
Saat       : 05:19 PM







ceph sistemimizde her hangi bir servis kaza/kırım geçirdiğinde, çatladığında loglara uyarı düşer.
Bizlere bu uyarıları inceleyip, düzeltmemiz sonrasından tek tek veya tüm uyarıları arşive göndermemiz gerekir.
ceph crash archive <id>
ceph crash acrhive-all



rbd pool oluşturulduğunda, poolda rbd application aktifleştirilir

Yeni Komut [126]# ceph osd pool application enable  my_pool_rbd  rbd
enabled application 'rbd' on pool 'my_pool_rbd'

radosgw işlemleri içinde bir pool oluşturulup, poolda özellik aktifleştirilir.

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:16 PM
Yeni Komut [130]# ceph osd pool create my_pool_radosgw 64 64
pool 'my_pool_radosgw' created

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:17 PM
Yeni Komut [131]# ceph osd pool application enable   my_pool_radosgw rgw
enabled application 'rgw' on pool 'my_pool_radosgw'

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:17 PM
Yeni Komut [132]#

Yeni Komut [132]# ceph -s
  cluster:
    id:     e339839e-8678-4717-b230-30ba4bca3a1f
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum rhcs4-mon1,rhcs4-mon2,rhcs4-mon3 (age 2h)
    mgr: rhcs4-mon2(active, since 2h), standbys: rhcs4-mon1, rhcs4-mon3
    mds: cephfs:1 {0=rhcs4-mds2=up:active} 1 up:standby
    osd: 24 osds: 24 up (since 6h), 24 in (since 25h)
    rgw: 2 daemons active (rhcs4-proxy1.rgw0, rhcs4-proxy2.rgw0)
 
  task status:
    scrub status:
        mds.rhcs4-mds2: idle
 
  data:
    pools:   13 pools, 512 pgs
    objects: 250 objects, 21 KiB
    usage:   34 GiB used, 214 GiB / 248 GiB avail
    pgs:     512 active+clean
 

..


Dizin      : /usr/share/ceph-ansible
Saat       : 05:19 PM
Yeni Komut [137]# rbd showmapped
id pool        namespace image  snap device    
0  my_pool_rbd           test01 -    /dev/rbd0

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:19 PM
Yeni Komut [138]# rbd --id  my_pool_rbd unmap /dev/rbd0

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:20 PM
Yeni Komut [139]# rbd showmapped

Sunucu : rhcs4-admin
Kullanici  : root
Dizin      : /usr/share/ceph-ansible
Saat       : 05:20 PM



rbd diski bir kullanıcıya vereceğimizde, kullanıcının yetkileri en az;

      mon 'allow r' osd 'allow rwx pool=mypool'

olmalı. Aksi durumda aşağıdaki gibi anlayamadığmız hatalar ile karşılaşırız.

root@d7567:~#  ceph auth get-or-create  client.d7567 mon 'allow r' osd 'allow rw pool=my_pool_rbd' mds 'allow rw'   -o /etc/ceph/ceph.client.d7567.keyring
root@d7567:~# rbd --id d7567 map my_pool_rbd/d7567disk-rbd
rbd: sysfs write failed
2020-08-30T15:40:48.522+0300 7f4e3bfff700 -1 librbd::image::OpenRequest: failed to retrieve image id: (1) Operation not permitted
2020-08-30T15:40:48.522+0300 7f4e3b7fe700 -1 librbd::ImageState: 0x7f4e48005630 failed to open image: (1) Operation not permitted
rbd: error opening image d7567disk-rbd: (1) Operation not permitted
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (1) Operation not permitted
root@d7567:~# ceph auth caps client.d7567  osd 'allow rwx pool=my_pool_rbd'
updated caps for client.d7567
root@d7567:~# rbd --id d7567 map my_pool_rbd/d7567disk-rbd
rbd: sysfs write failed
2020-08-30T15:42:06.624+0300 7f9c7f54b700 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [2,1]
2020-08-30T15:42:06.624+0300 7f9c7e549700 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [2,1]
2020-08-30T15:42:06.624+0300 7f9c7ed4a700 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [2,1]
rbd: couldn't connect to the cluster!
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (13) Permission denied

Yetkileri mon için r, osd için rwx yapalım.
 
root@d7567:~# ceph auth caps client.d7567 mon 'allow r'  osd 'allow rwx pool=my_pool_rbd'
updated caps for client.d7567
root@d7567:~# rbd --id d7567 map my_pool_rbd/d7567disk-rbd
/dev/rbd0
root@d7567:~#


root@d7567:~# rbd showmapped
id  pool         namespace  image          snap  device   
0   my_pool_rbd             d7567disk-rbd  -     /dev/rbd0
root@d7567:~#


Hiç yorum yok:

Yorum Gönder

Git kullanımı notları

 Temel ayarlar git kullanıcısı için: $ git config --global user.name "Remzi AKYÜZ" $ git config --global user.email "remzi@ak...