mount -t ceph 192.168.9.10:6789:/ /mnt/ceph
[17:07:39][root@ocean-lab ~]$ df -TH
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/vg_oceani-lv_root
ext4 30G 7.7G 21G 28% /
tmpfs tmpfs 111M 0 111M 0% /dev/shm
/dev/sda1 ext4 500M 94M 375M 21% /boot
192.168.9.10:/data2 nfs 30G 25G 4.0G 87% /mnt/log
192.168.9.10:6789:/ ceph 172G 5.4G 167G 4% /mnt/ceph
ceph-fuse [未测]
mon推荐有至少3个,假如挂掉一个、服务也能正常使用
ceph-fuse -m 192.168.9.10:6789,192.168.9.20:6789 /mnt/ceph
增加OSD
这里在agent01新增硬盘
[15:58:07][root@agent01 ~]$ cat /etc/ceph/ceph.conf
[global]
public network = 192.168.9.0/24
pid file = /var/run/ceph/$name.pid
auth cluster required = none
auth service required = none
auth client required = none
keyring = /etc/ceph/keyring.$name
osd pool default size = 1
osd pool default min size = 1
osd pool default crush rule = 0
osd crush chooseleaf type = 1
[mon]
mon data = /var/lib/ceph/mon/$name
mon clock drift allowed = .15
keyring = /etc/ceph/keyring.$name
[mon.0]
host = master01
mon addr = 192.168.9.10:6789
[mds]
keyring = /etc/ceph/keyring.$name
[mds.0]
host = master01
[osd]
osd data = /ceph/osd$id
osd recovery max active = 5
osd mkfs type = xfs
osd journal = /ceph/osd$id/journal
osd journal size = 1000
keyring = /etc/ceph/keyring.$name
[osd.2]
host = agent01
devs = /dev/sdc1
[osd.3]
host = agent01
devs = /dev/sdc2
master01 ~ $ cd /etc/ceph; scp keyring.client.admin agent01:/etc/ceph/
以下操作都在新增OSD节点上操作
初始化新增osd节点,需要在新增的节点机器上运行,这里在10.2.180.180上运行
ceph-osd -i 2 --mkfs --mkkey;
ceph-osd -i 3 --mkfs --mkkey;
加入节点
ceph auth add osd.2 osd 'allow *' mon 'allow rwx' -i /etc/ceph/keyring.osd.2;
ceph auth add osd.3 osd 'allow *' mon 'allow rwx' -i /etc/ceph/keyring.osd.3;
ceph osd create #added key for osd.2
ceph osd create #added key for osd.3
ceph osd rm osd_num # 删除osd
/etc/init.d/ceph -a start osd.2 #启动osd.2
/etc/init.d/ceph -a start osd.3 #启动osd.3
/etc/init.d/ceph -a start osd #启动所有osd
ceph -s #查看状态
ceph auth list #能查看所有认证节点
增加MDS
增加agent01 MDS到节点
将以下配置增加到配置文件,并同步到节点
[mds.1]
host = agent01
以下操作都在新增OSD节点上操作
生成key
ceph-authtool --create-keyring --gen-key -n mds.1 /etc/ceph/keyring.mds.1
加入认证
ceph auth add mds.1 osd 'allow *' mon 'allow rwx' mds 'allow' -i /etc/ceph/keyring.mds.1
启动新增MDS
/etc/init.d/ceph -a start mds.1
查看mds
^_^[10:06:51][root@master01 ~]# ceph mds stat
e50: 1/1/1 up {0=0=up:active}, 1 up:standby
增加MON
增加agent01 MDS到节点
将以下配置增加到配置文件,并同步到节点
[mon.1]
host = agent01
mon addr = 192.168.9.20:6789
导出key及mon map
mkdir /tmp/ceph
ceph auth get mon. -o /tmp/ceph/keyring.mon
ceph mon getmap -o /tmp/ceph/monmap
初始化新mon
ceph-mon -i 1 --mkfs --monmap /tmp/ceph/monmap --keyring /tmp/ceph/keyring.mon
启动新mon
ceph-mon -i 1 --public-addr 192.168.9.20:6789
加入quorum votes
ceph mon add 1 192.168.9.20:6789
查看mon
^_^[10:13:44][root@master01 ~]#ceph mon stat
e2: 2 mons at {0=192.168.9.10:6789/0,1=192.168.9.20:6789/0}, election epoch 2, quorum 0,1 0,1
FAQ:
^_^[11:19:10][root@master01 ~]#/etc/init.d/ceph -a start
=== mon.0 ===
Starting Ceph mon.0 on master01...already running
=== mds.0 ===
Starting Ceph mds.0 on master01...already running
=== osd.0 ===
Mounting xfs on master01:/ceph/osd0
Error ENOENT: osd.0 does not exist. create it before updating the crush map
failed: 'timeout 30 /usr/bin/ceph -c /etc/ceph/ceph.conf --name=osd.0 --keyring=/etc/ceph/keyring.osd.0 osd crush create-or-move -- 0 0.04 host=master01 root=default'
@_@[11:20:59][root@master01 ~]#ceph osd create
0
@_@[11:21:11][root@master01 ~]#ceph osd create
1
^_^[11:21:20][root@master01 ~]#/etc/init.d/ceph start osd.1
=== osd.1 ===
Mounting xfs on master01:/ceph/osd1
create-or-move updated item name 'osd.1' weight 0.04 at location {host=master01,root=default} to crush map
Starting Ceph osd.1 on master01...
starting osd.1 at :/0 osd_data /ceph/osd1 /ceph/osd1/journal