https://gist.github.com/vanduc95/a17f80d0636badd9aa002f2b493b777b
https://www.server-world.info/en/note?os=Ubuntu_16.04&p=ceph&f=1
updated to ubuntu18, then followed the above blogs
Journal lv
root@cephstor1:~# lvcreate -L +8G -n jn_lv "cephstor1_vg"
-----
root@cephstor1:~# ceph-volume lvm prepare --filestore --data cephstor1_vg/cephstor1_lv --journal cephstor1_vg/jn_lv
Running command: ceph-authtool --gen-print-key
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 6cde27f7-e940-4f0e-a053-faae20f3b909
Running command: ceph-authtool --gen-print-key
Running command: mkfs -t xfs -f -i size=2048 /dev/cephstor1_vg/cephstor1_lv
stdout: meta-data=/dev/cephstor1_vg/cephstor1_lv isize=2048 agcount=4, agsize=52428800 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0
data = bsize=4096 blocks=209715200, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=102400, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Running command: mount -t xfs -o rw,noatime,inode64 /dev/cephstor1_vg/cephstor1_lv /var/lib/ceph/osd/ceph-1
Running command: chown -R ceph:ceph /dev/dm-1
Running command: ln -s /dev/cephstor1_vg/jn_lv /var/lib/ceph/osd/ceph-1/journal
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
stderr: got monmap epoch 2
Running command: chown -R ceph:ceph /dev/dm-1
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
Running command: ceph-osd --cluster ceph --osd-objectstore filestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --osd-data /var/lib/ceph/osd/ceph-1/ --osd-journal /var/lib/ceph/osd/ceph-1/journal --osd-uuid 6cde27f7-e940-4f0e-a053-faae20f3b909 --setuser ceph --setgroup ceph
stderr: 2018-08-31 21:52:02.096100 b759f3c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected 6cde27f7-e940-4f0e-a053-faae20f3b909, invalid (someone else's?) journal
stderr: 2018-08-31 21:52:02.475694 b759f3c0 -1 journal do_read_entry(4096): bad header magic
2018-08-31 21:52:02.475705 b759f3c0 -1 journal do_read_entry(4096): bad header magic
stderr: 2018-08-31 21:52:02.476242 b759f3c0 -1 read_settings error reading settings: (2) No such file or directory
stderr: 2018-08-31 21:52:02.811785 b759f3c0 -1 created object store /var/lib/ceph/osd/ceph-1/ for osd.1 fsid a509ce31-e9cb-4491-a4d0-bea156feff5e
Running command: ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQAea4lbf/ywMhAA/yGa53/W5IpeH/C1+s/dLw==
stdout: creating /var/lib/ceph/osd/ceph-1/keyring
added entity osd.1 auth auth(auid = 18446744073709551615 key=AQAea4lbf/ywMhAA/yGa53/W5IpeH/C1+s/dLw== with 0 caps)
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
--> ceph-volume lvm prepare successful for: cephstor1_vg/cephstor1_lv
------
ROOT@CEPHSTOR1:~# ceph-volume lvm activate --filestore
Running command: mount -v /dev/cephstor1_vg/cephstor1_lv /var/lib/ceph/osd/ceph-2
stdout: mount: /dev/mapper/cephstor1_vg-cephstor1_lv mounted on /var/lib/ceph/osd/ceph-2.
Running command: ln -snf /dev/cephstor1_vg/jn_lv /var/lib/ceph/osd/ceph-2/journal
Running command: chown -R ceph:ceph /dev/dm-1
Running command: systemctl enable ceph-volume@lvm-2-25bb6df4-350b-42d7-ae0b-4c9509c8924f
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-25bb6df4-350b-42d7-ae0b-4c9509c8924f.service → /lib/systemd/system/ceph-volume@.service.
Running command: systemctl start ceph-osd@2
--> ceph-volume lvm activate successful for osd ID: 2
--> ceph-volume lvm activate successful for osd ID: None
--------------
Creating OSDs (NA)
The create subcommand wraps the two-step process to deploy a new OSD by calling the prepare subcommand and then calling the activate
subcommand into a single subcommand. The reason to use prepare and then activate separately is to gradually introduce new OSDs into a
storage cluster, and avoiding large amounts of data being rebalanced. There is nothing different to the process except the OSD will become
up and in immediately after completion.
Do the following step on an OSD node, and as the root user:
ceph-volume lvm create --filestore --data $VG_NAME/$LV_NAME --journal $JOURNAL_DEVICE
For example:
# ceph-volume lvm create --filestore --data example_vg/data_lv --journal example_vg/journal_lv
=================================
root@cephstor1:~# ceph-volume lvm list
====== osd.2 =======
[data] /dev/cephstor1_vg/cephstor1_lv
data device /dev/cephstor1_vg/cephstor1_lv
journal uuid mSMVWf-H2ln-62cy-HeL4-Z1L6-NbNl-a27seu
osd id 2
cluster fsid a509ce31-e9cb-4491-a4d0-bea156feff5e
cluster name ceph
osd fsid 25bb6df4-350b-42d7-ae0b-4c9509c8924f
encrypted 0
data uuid Zmp54b-QPf2-9Bv3-sSug-Trpl-d7eN-7mo1mS
cephx lockbox secret
type data
block uuid Zmp54b-QPf2-9Bv3-sSug-Trpl-d7eN-7mo1mS
block device /dev/cephstor1_vg/cephstor1_lv
crush device class None
journal device /dev/cephstor1_vg/jn_lv
[journal] /dev/cephstor1_vg/jn_lv
type journal
journal uuid mSMVWf-H2ln-62cy-HeL4-Z1L6-NbNl-a27seu
osd id 2
cluster fsid a509ce31-e9cb-4491-a4d0-bea156feff5e
cluster name ceph
osd fsid 25bb6df4-350b-42d7-ae0b-4c9509c8924f
encrypted 0
data uuid Zmp54b-QPf2-9Bv3-sSug-Trpl-d7eN-7mo1mS
cephx lockbox secret
crush device class None
data device /dev/cephstor1_vg/cephstor1_lv
journal device /dev/cephstor1_vg/jn_lv
root@cephstor1:~#
---
root@cephstor2:~# ceph-volume lvm prepare --filestore --data cephstor2_vg/cephstor2_lv --journal cephstor2_vg/jn2_lv
Running command: ceph-authtool --gen-print-key
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d2e93b35-6ddf-4b4d-9d9a-b33063232462
Running command: ceph-authtool --gen-print-key
Running command: mkfs -t xfs -f -i size=2048 /dev/cephstor2_vg/cephstor2_lv
stdout: meta-data=/dev/cephstor2_vg/cephstor2_lv isize=2048 agcount=4, agsize=52428800 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0
data = bsize=4096 blocks=209715200, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=102400, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Running command: mount -t xfs -o rw,noatime,inode64 /dev/cephstor2_vg/cephstor2_lv /var/lib/ceph/osd/ceph-2
Running command: chown -R ceph:ceph /dev/dm-1
Running command: ln -s /dev/cephstor2_vg/jn2_lv /var/lib/ceph/osd/ceph-2/journal
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
stderr: got monmap epoch 2
Running command: chown -R ceph:ceph /dev/dm-1
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
Running command: ceph-osd --cluster ceph --osd-objectstore filestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --osd-data /var/lib/ceph/osd/ceph-2/ --osd-journal /var/lib/ceph/osd/ceph-2/journal --osd-uuid d2e93b35-6ddf-4b4d-9d9a-b33063232462 --setuser ceph --setgroup ceph
stderr: 2018-08-31 23:24:03.987243 b75ec3c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected d2e93b35-6ddf-4b4d-9d9a-b33063232462, invalid (someone else's?) journal
stderr: 2018-08-31 23:24:04.644125 b75ec3c0 -1 journal do_read_entry(4096): bad header magic
2018-08-31 23:24:04.644138 b75ec3c0 -1 journal do_read_entry(4096): bad header magic
stderr: 2018-08-31 23:24:04.644711 b75ec3c0 -1 read_settings error reading settings: (2) No such file or directory
stderr: 2018-08-31 23:24:04.987278 b75ec3c0 -1 created object store /var/lib/ceph/osd/ceph-2/ for osd.2 fsid a509ce31-e9cb-4491-a4d0-bea156feff5e
Running command: ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQCygIlbf6yuMxAA21ihuGG1M/mqeZcY+cBaZA==
stdout: creating /var/lib/ceph/osd/ceph-2/keyring
added entity osd.2 auth auth(auid = 18446744073709551615 key=AQCygIlbf6yuMxAA21ihuGG1M/mqeZcY+cBaZA== with 0 caps)
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
--> ceph-volume lvm prepare successful for: cephstor2_vg/cephstor2_lv
---
root@cephstor2:~# ceph-volume lvm activate --filestore
Running command: ln -snf /dev/cephstor2_vg/jn2_lv /var/lib/ceph/osd/ceph-2/journal
Running command: chown -R ceph:ceph /dev/dm-1
Running command: systemctl enable ceph-volume@lvm-2-d2e93b35-6ddf-4b4d-9d9a-b33063232462
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-d2e93b35-6ddf-4b4d-9d9a-b33063232462.service → /lib/systemd/system/ceph-volume@.service.
Running command: systemctl start ceph-osd@2
--> ceph-volume lvm activate successful for osd ID: 2
--> ceph-volume lvm activate successful for osd ID: None
---
root@cephstor3:~# ceph-volume lvm prepare --filestore --data cephstor3_vg/cephstor3_lv --journal cephstor3_vg/jn3_lv
Running command: ceph-authtool --gen-print-key
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 0f7ab8b6-1c22-4e3c-a46c-3890e8e0d9fc
Running command: ceph-authtool --gen-print-key
Running command: mkfs -t xfs -f -i size=2048 /dev/cephstor3_vg/cephstor3_lv
stdout: meta-data=/dev/cephstor3_vg/cephstor3_lv isize=2048 agcount=4, agsize=52428800 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0
data = bsize=4096 blocks=209715200, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=102400, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Running command: mount -t xfs -o rw,noatime,inode64 /dev/cephstor3_vg/cephstor3_lv /var/lib/ceph/osd/ceph-3
Running command: chown -R ceph:ceph /dev/dm-1
Running command: ln -s /dev/cephstor3_vg/jn3_lv /var/lib/ceph/osd/ceph-3/journal
Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
stderr: got monmap epoch 2
Running command: chown -R ceph:ceph /dev/dm-1
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
Running command: ceph-osd --cluster ceph --osd-objectstore filestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --osd-data /var/lib/ceph/osd/ceph-3/ --osd-journal /var/lib/ceph/osd/ceph-3/journal --osd-uuid 0f7ab8b6-1c22-4e3c-a46c-3890e8e0d9fc --setuser ceph --setgroup ceph
stderr: 2018-08-31 23:35:30.829448 b6e523c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected 0f7ab8b6-1c22-4e3c-a46c-3890e8e0d9fc, invalid (someone else's?) journal
stderr: 2018-08-31 23:35:31.255897 b6e523c0 -1 journal do_read_entry(4096): bad header magic
2018-08-31 23:35:31.255907 b6e523c0 -1 journal do_read_entry(4096): bad header magic
stderr: 2018-08-31 23:35:31.256244 b6e523c0 -1 read_settings error reading settings: (2) No such file or directory
stderr: 2018-08-31 23:35:31.748767 b6e523c0 -1 created object store /var/lib/ceph/osd/ceph-3/ for osd.3 fsid a509ce31-e9cb-4491-a4d0-bea156feff5e
Running command: ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQBhg4lbMsiHNxAAEylG3lRW3JYFbTPCGmrCUw==
stdout: creating /var/lib/ceph/osd/ceph-3/keyring
added entity osd.3 auth auth(auid = 18446744073709551615 key=AQBhg4lbMsiHNxAAEylG3lRW3JYFbTPCGmrCUw== with 0 caps)
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
--> ceph-volume lvm prepare successful for: cephstor3_vg/cephstor3_lv
----
root@cephstor3:~# ceph-volume lvm activate --filestore
Running command: ln -snf /dev/cephstor3_vg/jn3_lv /var/lib/ceph/osd/ceph-3/journal
Running command: chown -R ceph:ceph /dev/dm-1
Running command: systemctl enable ceph-volume@lvm-3-0f7ab8b6-1c22-4e3c-a46c-3890e8e0d9fc
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-0f7ab8b6-1c22-4e3c-a46c-3890e8e0d9fc.service → /lib/systemd/system/ceph-volume@.service.
Running command: systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm activate successful for osd ID: None
-------------
https://stackoverflow.com/questions/45012905/removing-pool-mon-allow-pool-delete-config-option-to-true-before-you-can-destro
root@kvm-01:~# ceph -n mon.0 --show-config | grep mon_allow_pool_delete
mon_allow_pool_delete = true
root@kvm-01:~# ceph -n mon.1 --show-config | grep mon_allow_pool_delete
mon_allow_pool_delete = true
root@kvm-01:~# ceph -n mon.2 --show-config | grep mon_allow_pool_delete
mon_allow_pool_delete = true
root@kvm-01:~# cat /etc/ceph/ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 10.0.0.0/24
filestore xattr use omap = true
fsid = 41fa3ff6-e751-4ebf-8a76-3f4a445823d2
keyring = /etc/pve/priv/$cluster.$name.keyring
osd journal size = 5120
osd pool default min size = 1
public network = 10.0.0.0/24
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
[mon.0]
host = kvm-01
mon addr = 10.0.0.1:6789
mon allow pool delete = true
[mon.2]
host = kvm-03
mon addr = 10.0.0.3:6789
mon allow pool delete = true
[mon.1]
host = kvm-02
mon addr = 10.0.0.2:6789
mon allow pool delete = true
----
ceph tell mon.\* injectargs '--mon-allow-pool-delete=true'
requires a reboot
https://swamireddy.wordpress.com/2016/01/26/ceph-diff-between-erasure-and-replicated-pool-type/
No comments:
Post a Comment