Ensure that "ceph" hostname's resolves to your IP
cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 ceph
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.2.23 ceph
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
echo deb http://download.ceph.com/debian-jewel/ xenial main | sudo tee /etc/apt/sources.list.d/ceph.list
sudo apt-get update && sudo apt-get install ceph-deploy
sudo useradd -m -s /bin/bash ceph-deploy
echo "ceph-deploy:changeme"|sudo chpasswd
echo "ceph-deploy ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph-deploy
sudo chmod 0440 /etc/sudoers.d/ceph-deploy
su - ceph-deploy
ssh-keygen -t rsa -P "" -f ~ceph-deploy/.ssh/id_rsa
ssh-copy-id ceph-deploy@ceph
--Or --
cat ~ceph-deploy/.ssh/id_rsa.pub >~ceph-deploy/.ssh/authorized_keys
sudo chown -R ceph-deploy:ceph-deploy ~ceph-deploy/.ssh/ && chmod 600 ~ceph-deploy/.ssh/*
-------
Host ceph
Hostname ceph
User ceph-deploy
su - ceph-deploy
cd ~
mkdir my-cluster
cd my-cluster
ceph-deploy new ceph
Default pool size is how many replicas of our data we want (2). The chooseleaf setting is required to tell ceph we are only a single node and that it’s OK to store the same copy of data on the same physical node. Normally for safety, ceph distributes the copies and won’t leave all your eggs in the same basket (server).
echo "osd pool default size = 2" >> ceph.conf
echo "osd crush chooseleaf type = 0" >> ceph.conf
ceph-deploy install ceph
ceph-deploy mon create-initial
sudo /usr/sbin/ceph-disk zap /dev/sdb
sudo /usr/sbin/ceph-disk zap /dev/sdc
sudo /usr/sbin/ceph-disk zap /dev/sdd
ceph-deploy osd prepare ceph:sdb
ceph-deploy osd prepare ceph:sdc
ceph-deploy osd prepare ceph:sdd
ceph-deploy osd activate ceph:/dev/sdb1
ceph-deploy osd activate ceph:/dev/sdc1
ceph-deploy osd activate ceph:/dev/sdd1
ceph-deploy admin ceph
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph -s
ceph-deploy rgw create ceph
ceph-deploy mds create ceph
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 128
ceph fs new cephfs cephfs_metadata cephfs_data
By default all access operations require authentication. The ceph install has created some default credentials for us. We will use it later on client configuration.
cat ~/my-cluster/ceph.client.admin.keyring
[client.admin]
key = AQCv2yRXOVlUMxAAK+e6gehnirXTV0O8PrJYQQ==
sudo apt-get install ceph-fs-common
sudo mkdir /mnt/mycephfs
sudo mount -t ceph ceph:6789:/ /mnt/mycephfs -o name=admin,secret=AQCv2yRXOVlUMxAAK+e6gehnirXTV0O8PrJYQQ==
- Create one file
echo "This a TEST" >testfile.txt
- Now create a pool (in this case "mytest")
ceph osd pool create mytest 8
- Now we put testfile on the pool
rados put testfile $(pwd)/testfile.txt --pool=mytest
- We can now check file existence on "mytest" pool
rados -p mytest ls
- We can check object mapping
ceph osd map mytest testfile
- We can now delete the file previously created on "mytest" pool
rados rm testfile --pool=mytest
- Finally, we delete the pool
ceph osd pool delete mytest mytest --yes-i-really-really-mean-it
REFERENCE: http://prashplus.blogspot.com/2018/01/ceph-single-node-setup-ubuntu.html