User Tools

Site Tools


cephtest:testing_ceph

instalar las vms usando el sistema del pxe

paquetes que deben estar instalados openssh-server xfsprogs atop

configurar la red. dos interfaces para los osd. una para los mon todos deben tener una ip al publico

configurar el nombre del server

configurar dns para asegurarse de que todos se conozcan preferiblemente en el /etc/hosts

wget http://redtic.uclv.cu/dokuwiki/_export/code/cephtest:testing_ceph?codeblock=1 -O /tmp/run.sh
bash /tmp/run.sh
run.sh
#ejecutar esto despues de que la red estes configurada
 
echo "Hostname ???? "
read XX
echo $XX > /etc/hostname
 
cat > /etc/apt/sources.list << 'EoT'
deb http://repos.uclv.edu.cu/ubuntu/ trusty main restricted
deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates main restricted
deb http://repos.uclv.edu.cu/ubuntu/ trusty universe
deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates universe
deb http://repos.uclv.edu.cu/ubuntu/ trusty multiverse
deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates multiverse
deb http://repos.uclv.edu.cu/ubuntu/ trusty-backports main restricted universe multiverse
deb http://repos.uclv.edu.cu/ubuntu trusty-security main restricted
deb http://repos.uclv.edu.cu/ubuntu trusty-security universe
deb http://repos.uclv.edu.cu/ubuntu trusty-security multiverse
EoT
 
 
# conectado a internet
cat >> /etc/apt/sources.list << 'EoT'
deb http://download.ceph.com/debian/ jessie main
EoT
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E84AC2C0460F3994
 
 
apt-get update
 
apt-get install -y openssh-server xfsprogs atop rdate
 
cat > /etc/rc.local << 'EoT'
#!/bin/bash
 
rdate -n 10.12.1.50
 
EoT
 
cat >> /etc/hosts <<'EoT'
 
10.12.1.151 ceph1
10.12.1.152 ceph2
10.12.1.153 ceph3
 
10.12.1.154 mon1
10.12.1.155 mon2
10.12.1.156 mon3
 
10.12.1.157 ceph-admin
 
10.12.253.50 ceph1
10.12.253.51 ceph2
10.12.253.52 ceph3
 
EoT
 
echo  "Pass para user ceph ???"
read XX
echo $XX > /tmp/res
echo $XX >> /tmp/res
 
useradd --home /home/ceph  ceph 
mkdir /home/ceph
chown ceph.ceph /home/ceph
 
cat /tmp/res | passwd ceph
rm -f /tmp/res
 
echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
chmod 0440 /etc/sudoers.d/ceph

Preparar los discos en los nodos que seran OSD

#crear una particion en cada lado
parted -s /dev/sdc mklabel gpt mkpart primary xfs 0% 100%
mkfs.xfs /dev/sdc -f

#comprobar
lsblk -f

En el nodo admin

apt-get update
apt-get install ceph-deploy

#Esto se debe hacer desde el user ceph

mkdir ceph-deploy
cd ceph-deploy
# ceph-deploy new mon1 mon2 mon3
[ceph_deploy.cli][INFO  ] Invoked (1.4.0): /usr/bin/ceph-deploy new mon1 mon2 mon3
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][DEBUG ] Resolving host mon1
[ceph_deploy.new][DEBUG ] Monitor mon1 at 10.12.1.154
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[mon1][DEBUG ] connected to host: ceph-admin
[mon1][INFO  ] Running command: ssh -CT -o BatchMode=yes mon1
[ceph_deploy.new][DEBUG ] Resolving host mon2
[ceph_deploy.new][DEBUG ] Monitor mon2 at 10.12.1.155
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[mon2][DEBUG ] connected to host: ceph-admin
[mon2][INFO  ] Running command: ssh -CT -o BatchMode=yes mon2
[ceph_deploy.new][DEBUG ] Resolving host mon3
[ceph_deploy.new][DEBUG ] Monitor mon3 at 10.12.1.156
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[mon3][DEBUG ] connected to host: ceph-admin
[mon3][INFO  ] Running command: ssh -CT -o BatchMode=yes mon3
[ceph_deploy.new][DEBUG ] Monitor initial members are ['mon1', 'mon2', 'mon3']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['10.12.1.154', '10.12.1.155', '10.12.1.156']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...

Editar ceph.conf

[global]
fsid = 531b4820-2257-4f3b-b12b-e1f6827ecce5
mon_initial_members = mon1, mon2, mon3
mon_host = 10.12.1.154,10.12.1.155,10.12.1.156
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true

public network = 10.12.1.0/24
cluster network = 10.12.253.0/24

#Choose reasonable numbers for number of replicas and placement groups.
osd pool default size = 2 # Write an object 2 times
osd pool default min size = 1 # Allow writing 1 copy in a degraded state
osd pool default pg num = 64
osd pool default pgp num = 64

#Choose a reasonable crush leaf type
#0 for a 1-node cluster.
#1 for a multi node cluster in a single rack
#2 for a multi node, multi chassis cluster with multiple hosts in a chassis
#3 for a multi node cluster with hosts across racks, etc.
osd crush chooseleaf type = 1

Instalar cada server

#desde el user ceph

ceph-deploy install ceph-admin mon1 mon2 mon3 
ceph-deplut install ceph1 ceph2 ceph3

Si todo va ok ya se puede crear la conf inicial para los monitores.

ceph-deploy mon create-initial

La salida debe ir incrementando un file de configuracion poco a poco a medida que se pasa por todos los monitores. Se debe ver algo asi:

[mon3][INFO  ] Running command: sudo initctl emit ceph-mon cluster=ceph id=mon3
[mon3][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.mon3.asok mon_status
[mon3][DEBUG ] ********************************************************************************
[mon3][DEBUG ] status for monitor: mon.mon3
[mon3][DEBUG ] {
[mon3][DEBUG ]   "election_epoch": 1,
[mon3][DEBUG ]   "extra_probe_peers": [
[mon3][DEBUG ]     "10.12.1.154:6789/0",
[mon3][DEBUG ]     "10.12.1.155:6789/0"
[mon3][DEBUG ]   ],
[mon3][DEBUG ]   "monmap": {
[mon3][DEBUG ]     "created": "0.000000",
[mon3][DEBUG ]     "epoch": 0,
[mon3][DEBUG ]     "fsid": "531b4820-2257-4f3b-b12b-e1f6827ecce5",
[mon3][DEBUG ]     "modified": "0.000000",
[mon3][DEBUG ]     "mons": [
[mon3][DEBUG ]       {
[mon3][DEBUG ]         "addr": "10.12.1.154:6789/0",
[mon3][DEBUG ]         "name": "mon1",
[mon3][DEBUG ]         "rank": 0
[mon3][DEBUG ]       },
[mon3][DEBUG ]       {
[mon3][DEBUG ]         "addr": "10.12.1.155:6789/0",
[mon3][DEBUG ]         "name": "mon2",
[mon3][DEBUG ]         "rank": 1
[mon3][DEBUG ]       },
[mon3][DEBUG ]       {
[mon3][DEBUG ]         "addr": "10.12.1.156:6789/0",
[mon3][DEBUG ]         "name": "mon3",
[mon3][DEBUG ]         "rank": 2
[mon3][DEBUG ]       }
[mon3][DEBUG ]     ]
[mon3][DEBUG ]   },
[mon3][DEBUG ]   "name": "mon3",
[mon3][DEBUG ]   "outside_quorum": [],
[mon3][DEBUG ]   "quorum": [],
[mon3][DEBUG ]   "rank": 2,
[mon3][DEBUG ]   "state": "electing",
[mon3][DEBUG ]   "sync_provider": []
[mon3][DEBUG ] }
[mon3][DEBUG ] ********************************************************************************
[mon3][INFO  ] monitor: mon.mon3 is running
[mon3][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.mon3.asok mon_status
[ceph_deploy.mon][INFO  ] processing monitor mon.mon1
[mon1][DEBUG ] connected to host: mon1
[mon1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.mon1.asok mon_status
[ceph_deploy.mon][INFO  ] mon.mon1 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.mon2
[mon2][DEBUG ] connected to host: mon2
[mon2][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.mon2.asok mon_status
[ceph_deploy.mon][INFO  ] mon.mon2 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.mon3
[mon3][DEBUG ] connected to host: mon3
[mon3][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.mon3.asok mon_status
[ceph_deploy.mon][INFO  ] mon.mon3 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO  ] Running gatherkeys...
[ceph_deploy.gatherkeys][DEBUG ] Checking mon1 for /etc/ceph/ceph.client.admin.keyring
[mon1][DEBUG ] connected to host: mon1
[mon1][DEBUG ] detect platform information from remote host
[mon1][DEBUG ] detect machine type
[mon1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.client.admin.keyring key from mon1.
[ceph_deploy.gatherkeys][DEBUG ] Have ceph.mon.keyring
[ceph_deploy.gatherkeys][DEBUG ] Checking mon1 for /var/lib/ceph/bootstrap-osd/ceph.keyring
[mon1][DEBUG ] connected to host: mon1
[mon1][DEBUG ] detect platform information from remote host
[mon1][DEBUG ] detect machine type
[mon1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-osd.keyring key from mon1.
[ceph_deploy.gatherkeys][DEBUG ] Checking mon1 for /var/lib/ceph/bootstrap-mds/ceph.keyring
[mon1][DEBUG ] connected to host: mon1
[mon1][DEBUG ] detect platform information from remote host
[mon1][DEBUG ] detect machine type
[mon1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-mds.keyring key from mon1.

Proceso de creacion de los OSD

Para ver los discos disponibles usar:

ceph-deploy disk list ceph1

Para crear un OSD en un disco o particion especifico. Se puede realizar un zap primero para limpiarlo si se desea.

ceph-deploy disk zap ceph1:sdc  ......

ceph-deploy osd create ceph1:sdc
#para tener un journal
ceph-deploy osd create ceph1:sdc:/dec/sdb1

Terminar con un deply de la configuracion

ceph-deploy admin ceph-admin mon1 mon2 mon3 ceph1 ceph2 ceph3

Es importante que todos los relojes esten sincronizados. Se recomienta usar ntp como dice en el articulo al final de esta entrada.

Si todo fue bien deberia verse algo como esto:

ceph@ceph-admin:~/cluster$ ceph health
HEALTH_OK
ceph@ceph-admin:~/cluster$ ceph -w
    cluster 531b4820-2257-4f3b-b12b-e1f6827ecce5
     health HEALTH_OK
     monmap e1: 3 mons at {mon1=10.12.1.154:6789/0,mon2=10.12.1.155:6789/0,mon3=10.12.1.156:6789/0}, election epoch 8, quorum 0,1,2 mon1,mon2,mon3
     osdmap e13: 3 osds: 3 up, 3 in
      pgmap v23: 192 pgs, 3 pools, 0 bytes data, 0 objects
            104 MB used, 82795 MB / 82900 MB avail
                 192 active+clean
2016-06-02 12:57:14.610325 mon.0 [INF] osdmap e13: 3 osds: 3 up, 3 in

ceph@ceph-admin:~/cluster$ ceph osd tree
# id    weight  type name       up/down reweight
-1      0.09    root default
-2      0.03            host ceph1
0       0.03                    osd.0   up      1
-3      0.03            host ceph2
1       0.03                    osd.1   up      1
-4      0.03            host ceph3
2       0.03                    osd.2   up      1


Para usar un bloque

ceph@ceph-admin:~/cluster$ rbd create mirepo --size 20480

ceph@ceph-admin:~/cluster$ rbd  ls
mirepo

ceph@ceph-admin:~/cluster$ rbd --image mirepo info
rbd image 'mirepo':
        size 20480 MB in 5120 objects
        order 22 (4096 kB objects)
        block_name_prefix: rb.0.102d.2ae8944a
        format: 1

Luego en la maquina donde se desea mapear ese bloque creado:

root@net-test:~#
root@net-test:~# modprobe rbd
root@net-test:~#
root@net-test:~# echo "10.12.1.154,10.12.1.155,10.12.1.156 name=admin,secret=AQDBYlBX8CiSHRAAVGYor8pmE2oGIQk7YO3Tig== rbd mirepo"  > /sys/bus/rbd/add
10.12.1.154,10.12.1.155,10.12.1.156 name=admin,secret=AQDBYlBX8CiSHRAAVGYor8pmE2oGIQk7YO3Tig== rbd mirepo /sys/bus/rbd/add

root@net-test:~# ll /dev/rbd*
brw-rw---- 1 root disk 254, 0 Jun  2 13:18 /dev/rbd0

/dev/rbd:
total 0
drwxr-xr-x  3 root root   60 Jun  2 13:18 .
drwxr-xr-x 18 root root 3020 Jun  2 13:18 ..
drwxr-xr-x  2 root root   60 Jun  2 13:18 rbd
root@net-test:~#

el secret se busca en un monitor en el archivo :

root@mon1:~# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
        key = AQDBYlBX8CiSHRAAVGYor8pmE2oGIQk7YO3Tig==
root@mon1:~#

Tomado de :

http://www.virtualtothecore.com/en/adventures-with-ceph-storage-part-5-install-ceph-in-the-lab/

Breve guia para Quitar/Adicionar un OSD

Asignando solo un grupo de discos a un pool

cephtest/testing_ceph.txt · Last modified: 2020/04/10 17:38 (external edit)