This is an old revision of the document!
instalar las vms usando el sistema del pxe
paquetes que deben estar instalados openssh-server xfsprogs atop
configurar la red. dos interfaces para los osd. una para los mon todos deben tener una ip al publico
configurar el nombre del server
configurar dns para asegurarse de que todos se conozcan preferiblemente en el /etc/hosts
wget http://redtic.uclv.cu/dokuwiki/_export/code/cephtest:testing_ceph?codeblock=1 -O /tmp/run.sh bash /tmp/run.sh
#ejecutar esto despues de que la red estes configurada echo "Hostname ???? " read XX echo $XX > /etc/hostname cat > /etc/apt/sources.list << 'EoT' deb http://repos.uclv.edu.cu/ubuntu/ trusty main restricted deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates main restricted deb http://repos.uclv.edu.cu/ubuntu/ trusty universe deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates universe deb http://repos.uclv.edu.cu/ubuntu/ trusty multiverse deb http://repos.uclv.edu.cu/ubuntu/ trusty-updates multiverse deb http://repos.uclv.edu.cu/ubuntu/ trusty-backports main restricted universe multiverse deb http://repos.uclv.edu.cu/ubuntu trusty-security main restricted deb http://repos.uclv.edu.cu/ubuntu trusty-security universe deb http://repos.uclv.edu.cu/ubuntu trusty-security multiverse EoT apt-get update apt-get install -y openssh-server xfsprogs atop rdate cat > /etc/rc.local << 'EoT' #!/bin/bash rdate -n 10.12.1.50 EoT cat >> /etc/hosts <<'EoT' 10.12.1.151 ceph1 10.12.1.152 ceph2 10.12.1.153 ceph3 10.12.1.154 mon1 10.12.1.155 mon2 10.12.1.156 mon3 10.12.1.157 ceph-admin 10.12.253.50 ceph1 10.12.253.51 ceph2 10.12.253.52 ceph3 EoT echo "Pass para user ceph ???" read XX echo $XX > /tmp/res echo $XX >> /tmp/res useradd --home /home/ceph ceph mkdir /home/ceph chown ceph.ceph /home/ceph cat /tmp/res | passwd ceph rm -f /tmp/res echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph chmod 0440 /etc/sudoers.d/ceph
Preparar los discos en los nodos que seran OSD
#crear una particion en cada lado parted -s /dev/sdc mklabel gpt mkpart primary xfs 0% 100% mkfs.xfs /dev/sdc -f #comprobar lsblk -f
En el nodo admin
apot-get update apt-get install ceph-deploy mkdir ceph-deploy cd ceph-deploy
root@ceph-admin:~# ceph-deploy new mon1 mon2 mon3 [ceph_deploy.cli][INFO ] Invoked (1.4.0): /usr/bin/ceph-deploy new mon1 mon2 mon3 [ceph_deploy.new][DEBUG ] Creating new cluster named ceph [ceph_deploy.new][DEBUG ] Resolving host mon1 [ceph_deploy.new][DEBUG ] Monitor mon1 at 10.12.1.154 [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [mon1][DEBUG ] connected to host: ceph-admin [mon1][INFO ] Running command: ssh -CT -o BatchMode=yes mon1 [ceph_deploy.new][DEBUG ] Resolving host mon2 [ceph_deploy.new][DEBUG ] Monitor mon2 at 10.12.1.155 [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [mon2][DEBUG ] connected to host: ceph-admin [mon2][INFO ] Running command: ssh -CT -o BatchMode=yes mon2 [ceph_deploy.new][DEBUG ] Resolving host mon3 [ceph_deploy.new][DEBUG ] Monitor mon3 at 10.12.1.156 [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [mon3][DEBUG ] connected to host: ceph-admin [mon3][INFO ] Running command: ssh -CT -o BatchMode=yes mon3 [ceph_deploy.new][DEBUG ] Monitor initial members are ['mon1', 'mon2', 'mon3'] [ceph_deploy.new][DEBUG ] Monitor addrs are ['10.12.1.154', '10.12.1.155', '10.12.1.156'] [ceph_deploy.new][DEBUG ] Creating a random mon key... [ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf... [ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
Editar ceph.conf
[global] fsid = 531b4820-2257-4f3b-b12b-e1f6827ecce5 mon_initial_members = mon1, mon2, mon3 mon_host = 10.12.1.154,10.12.1.155,10.12.1.156 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx filestore_xattr_use_omap = true public network = 10.12.1.0/24 cluster network = 10.12.253.0/24 #Choose reasonable numbers for number of replicas and placement groups. osd pool default size = 2 # Write an object 2 times osd pool default min size = 1 # Allow writing 1 copy in a degraded state osd pool default pg num = 64 osd pool default pgp num = 64 #Choose a reasonable crush leaf type #0 for a 1-node cluster. #1 for a multi node cluster in a single rack #2 for a multi node, multi chassis cluster with multiple hosts in a chassis #3 for a multi node cluster with hosts across racks, etc. osd crush chooseleaf type = 1
Tomado de :
http://www.virtualtothecore.com/en/adventures-with-ceph-storage-part-5-install-ceph-in-the-lab/