#!/bin/bash
if [ $# -lt 1 ];then
printf "Usage:$0 {host}}\n" $0
exit 1
fi
mds_host=$1
mds_name=mds.$mds_host
mds_data=/data/$mds_name
keyfile=ceph.$mds_host.keyring
mon_host=ceph-mon:6789
### Stop current running mds daemons first
ssh $mds_host "killall -TERM ceph-mds"
ssh $mds_host "rm -f $mds_data/*"
ssh $mds_host "mkdir -p $mds_data"
### Clean the old keyring file first
rm -f $keyfile
### Create new keyring file
ceph-authtool -C -g -n $mds_name $keyfile
ceph auth add $mds_name mon 'allow profile mds' osd 'allow rwx' mds 'allow' -i $keyfile
scp \
/etc/ceph/ceph.conf \
/etc/ceph/ceph.client.admin.keyring $mds_host:/etc/ceph
scp $keyfile $mds_host:$mds_data/keyring
ssh $mds_host "ceph-mds -i $mds_host -n $mds_name -m $mon_host --mds-data=/data/mds.$mds_host"
脚本执行完之后会自动把服务启动,在ceph-mon节点上查看ceph集群状态:
ceph-mon:~ # ceph -s
cluster 266900a9-b1bb-4b1f-9bd0-c509578aa9c9
health HEALTH_OK
monmap e1: 1 mons at {ceph-mon=192.168.239.131:6789/0}, election epoch 2, quorum 0 ceph-mon
mdsmap e4: 1/1/1 up {0=ceph-mds=up:active}
osdmap e17: 3 osds: 3 up, 3 in
pgmap v23: 192 pgs, 3 pools, 1884 bytes data, 20 objects
3180 MB used, 45899 MB / 49080 MB avail
192 active+clean
osd状态:
ceph-mon:~ # ceph osd tree
# id weight type name up/down reweight
-1 3 root default
-2 1 host ceph-osd0
0 1 osd.0 up 1
-3 1 host ceph-osd1
1 1 osd.1 up 1
-4 1 host ceph-osd2
2 1 osd.2 up 1
在ceph-mon节点上查看进程:
ceph-mon:~ # ps ax |grep ceph-mon
8993 pts/0 Sl 0:00 ceph-mon -i ceph-mon --mon-data /data/ceph-mon
在ceph-osdX节点上查看进程:
ceph-osd0:~ # ps ax | grep ceph-osd
13140 ? Ssl 0:02 ceph-osd -i 0 --osd-data /data/osd.0 --osd-journal /data/osd.0/journal
在ceph-mds节点上查看进程:
ceph-mds:~ # ps ax |grep ceph-mds
42260 ? Ssl 0:00 ceph-mds -i ceph-mds -n mds.ceph-mds -m ceph-mon:6789 --mds-data=/data/mds.ceph-mds
7、由于SLES 11系列的内核还不支持ceph模块,您需要在客户端安装较高版本的内核才能获得mount.ceph的功能。mount.ceph命令用法如下:
mount.ceph {mon ip/host}:/ {mount point} -o name=admin,secret={your keyring}
mount.ceph ceph-mon:/ /mnt/cephfs -v -o name=admin,secret=AQD5jp5UqPRtCRAAvpRyhlNI0+qEHjZYqEZw8A==
查看挂载状态:
ceph-mon:/etc/ceph # df -Ph
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/rootvg-root 12G 5.3G 5.7G 49% /
udev 12G 5.3G 5.7G 49% /dev
tmpfs 12G 5.3G 5.7G 49% /dev/shm
/dev/sda1 185M 36M 141M 21% /boot
/dev/sdb1 16G 35M 16G 1% /data
192.168.239.131:/ 48G 3.2G 45G 7% /mnt/cephfs
--------------------------------------分割线 --------------------------------------
CentOS 6.3上部署Ceph
HOWTO Install Ceph On FC12, FC上安装Ceph分布式文件系统
Ubuntu 12.04 Ceph分布式文件系统
Fedora 14上安��� Ceph 0.24
--------------------------------------分割线 --------------------------------------