Compilation of:
- https://www.dmosk.ru/instruktions.php?object=ceph-centos7
- https://blog.mailon.com.ua/
- https://www.howtoforge.com/tutorial/how-to-build-a-ceph-cluster-on-centos-7/
- http://vasilisc.com/ceph-3
- https://doc.ispsystem.com/index.php/Ceph_cluster
- https://doc.ispsystem.ru/index.php/Создание_Ceph-кластера
- https://www.sebastien-han.fr/blog/2013/05/13/deploy-a-ceph-mds-server/
- https://blog.programster.org/ubuntu-14-04-add-a-ceph-metadata-server
- http://docs.ceph.com/docs/master/cephfs/cache-size-limits/
- http://docs.ceph.com/docs/jewel/start/hardware-recommendations/
- http://docs.ceph.com/docs/master/rados/deployment/ceph-deploy-mds/
- http://docs.ceph.com/docs/master/install/
- http://docs.ceph.com/docs/master/radosgw/swift/
- http://docs.ceph.com/docs/master/radosgw/swift/python/
- http://docs.ceph.com/docs/dumpling/radosgw/
- https://habr.com/post/315646/
- https://habr.com/post/313644/
Prepare user with NOPASSWS in /etc/suders
NODE_LIST = node1 node2 node3 #...
for node in $NODE_LIST; do
ssh user@$(node) 'sudo yum update -y'
ssh user@$(node) 'sudo yum install ntp -y'
ssh user@$(node) 'sudo \cp /usr/share/zoneinfo/Europe/Moscow /etc/localtime'
ssh user@$(node) 'sudo crontab -e' # add "0 0 * * * /sbin/ntpdate ru.pool.ntp.org"
ssh user@$(node) 'sudo ntpdate ru.pool.ntp.org'
ssh user@$(node) 'sudo hwclock --systohc'
# disable SELinux: `setenforce 0` or: `vi /etc/selinux/config` and edit "SELINUX=disabled"
ssh user@$(node) 'sudo sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config'
# or moreover:
ssh user@$(node) 'sudo yum remove selinux* -y"
# open needed ports for monitors:
ssh user@$(node) 'sudo firewall-cmd --permanent --add-port=6789/tcp'
# open needed ports for storage servers:
ssh user@$(node) 'sudo firewall-cmd --permanent --add-port=6800-7100/tcp'
ssh user@$(node) 'sudo firewall-cmd --reload'
ssh user@$(node) 'sudo useradd ceph -m'
ssh user@$(node) 'sudo passwd ceph' # switch off after deploying ssh keys
ssh user@$(node) 'sudo usermod -aG wheel ceph'
# or edit `vi /etc/sudoers.d/ceph`: "ceph ALL = (root) NOPASSWD:ALL\n Defaults:ceph !requiretty"
# and then `chmod 0440 /etc/sudoers.d/ceph`
## echo "cephuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephuser
## sudo chmod 0440 /etc/sudoers.d/cephuser
# if we working without DNS, edit `vi /etc/hosts`, add hosts for every node
ssh user@$(node) 'sudo vi /etc/yum.repos.d/ceph.repo'
done
For every node: sudo vi /etc/yum.repos.d/ceph.repo
and add:
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-hammer/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
And then:
for node in $NODE_LIST; do
ssh user@$(node) 'sudo yum update -y'
ssh user@$(node) 'sudo yum install ceph-deploy -y'
ssh user@$(node) 'sudo mv /etc/yum.repos.d/ceph.repo /etc/yum.repos.d/ceph-deploy.repo'
ssh-copy-id -i $PATH_TO_IDENTITY_FILE ceph@$(node)
done
Admin node:
mkdir ~/ceph-admin
cd ceph-admin
ceph-deploy new $NODE_LIST
ceph-deploy install $NODE_LIST
for node in $NODE_LIST; do
ssh user@$(node) 'sudo yum update -y'
done
Setting up monitor servers:
ceph-deploy mon create-initial
ceph-deploy admin $NODE_LIST
Prepare disks:
# List disks:
ceph-deploy disk list $(node_name)
ceph-deploy osd prepare $node1:sdb $node1:sdc $node2:sdb $node2:sdc $node3:sdb $node3:sdc
ceph-deploy osd activate $node1:sdb1 $node1:sdc1 $node2:sdb1 $node2:sdc1 $node3:sdb1 $node3:sdc1
Check workability:
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph osd tree
ceph -s
Sync time on servers
for node in $NODE_LIST; do
ssh user@$(node) 'sudo ntpdate ru.pool.ntp.org'
done
For CephFS:
ceph-deploy mds create $MDS_NODES
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 128
ceph fs new cephfs cephfs_metadata cephfs_data
Clients (ceph-fuse)
ceph-deploy install client1 client2
# copy "/etc/ceph/ceph.client.admin.keyring" tp clients
sudo mkdir /opt/cephfs_storage
sudo yum install ceph-fuse -y
sudo ceph-fuse -k ./ceph.client.admin.keyring -m <monitor-node-ip>:6789 /opt/cephfs_storage
# etc/fstab: add "id=admin,conf=/etc/ceph/ceph.conf /opt/mycephfs fuse.ceph defaults,_netdev 0 0"
Change fuse to Kernel-Drive for CephFS
ceph-authtool --name client.admin /etc/ceph/ceph.client.admin.keyring --print-key >> /etc/ceph/ceph.keyring
# edit /etc/fstab: <monitor-node1-ip>,<monitor-node2-ip>,<monitor-node3-ip>:/ /opt/ceph ceph name=admin,secretfile=/etc/ceph/ceph.keyring,noatime,_netdev 0 2
mount -a