在SUSE Linux Enterprise Server 11 SP3上轻松搭建Ceph集群。
环境简介:
一个mon节点,一个mds节点,三个osd节点,分别如下
192.168.239.131 ceph-mon
192.168.239.132 ceph-mds
192.168.239.160 ceph-osd0
192.168.239.161 ceph-osd1
192.168.239.162 ceph-osd2
1、从suse.com官网注册一个账号,下载SLES 11 SP3和SUSE Cloud 4的ISO
2、给每个节点安装系统,然后设置两个安装源,一个OS,一个SUSE Cloud 4
3、配置ceph-mon到其他节点的root用户无密码登录ssh
4、复制ceph-mon节点的/etc/hosts到其他节点
5、安装ceph
zypper -n install ceph ceph-radosgw
6、在ceph-mon节点上,使用setup.sh分别调用init-mon.sh , init-osd.sh , init-mds.sh自动配置mon,osd,mds。
setup.sh和init-mon.sh会进入当前目录下的./ceph文件夹,请务必在/etc以外的目录执行。
各个脚本的代码如下(仅供参考):
(1) setup.sh
#!/bin/bash
### Stop all existed OSD nodes
printf "Killing all ceph-osd nodes..."
for i in 0 1 2;do
ssh ceph-osd$i "killall -TERM ceph-osd"
sleep 1
done
printf "Done\n"
### Initialize mon on this system
killall -TERM ceph-mon
printf "Initializing ceph-mon on current node..."
./init-mon.sh
cd ./ceph
printf "Done\n"
### Initialize osd services on nodes
for i in 0 1 2;do
../init-osd.sh ceph-osd$i $i
sleep 1
done
### Initialize mds on remote node
printf "Initializing mds on ceph-mds..."
../init-mds.sh ceph-mds
printf "Done\n"
(2) init-mon.sh
#!/bin/bash
fsid=$(uuidgen)
mon_node=$(hostname)
mon_ip=192.168.239.131
cluster_net=192.168.239.0/24
public_net=192.168.1.0/24
mon_data=/data/$mon_node
killall -TERM ceph-mon
rm -f /etc/ceph/ceph.conf /etc/ceph/*.keyring
rm -f /var/lib/ceph/bootstrap-mds/* /var/lib/ceph/bootstrap-osd/*
rm -f /var/log/ceph/*.log
confdir=./ceph
rm -fr $confdir
mkdir -p $confdir
cd $confdir
rm -fr $mon_data
mkdir -p $mon_data
cat > ceph.conf << EOF
[global]
fsid = $fsid
mon initial members = $mon_node
mon host = $mon_ip
public network = $public_net
cluster network = $cluster_net
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
EOF
ceph-authtool --create-keyring bootstrap-osd.keyring --gen-key -n client.bootstrap-osd
ceph-authtool --create-keyring bootstrap-mds.keyring --gen-key -n client.bootstrap-mds
ceph-authtool --create-keyring ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool ceph.mon.keyring --import-keyring ceph.client.admin.keyring
monmaptool --create --add $mon_node $mon_ip --fsid $(grep fsid ceph.conf | awk '{ print $NF}') monmap
cp -a ceph.conf /etc/ceph
cp -a ceph.client.admin.keyring /etc/ceph
### Make filesystem for ceph-mon
ceph-mon --mkfs -i $mon_node --monmap monmap --keyring ceph.mon.keyring --mon-data $mon_data
### Start the ceph-mon service
ceph-mon -i $mon_node --mon-data $mon_data
### Initialize bootstrap keyrings
ceph auth add client.bootstrap-mds mon 'allow profile bootstrap-mds' -i bootstrap-mds.keyring
ceph auth add client.bootstrap-osd mon 'allow profile bootstrap-osd' -i bootstrap-osd.keyring
(3) init-osd.sh
#!/bin/bash
if [ $# -lt 2 ];then
printf "Usage:$0 {host} {osd num}\n" $0
exit 1
fi
host=$1
osd_num=$2
ssh $host "killall -TERM ceph-osd"
ssh $host "rm -f /var/lib/ceph/bootstrap-osd/*keyring"
ssh $host "rm -fr /data/osd.$osd_num/*"
ssh $host "mkdir -p /var/lib/ceph/bootstrap-osd"
ssh $host "mkdir -p /data/osd.$osd_num"
scp ceph.conf ceph.client.admin.keyring $host:/etc/ceph
scp bootstrap-osd.keyring $host:/var/lib/ceph/bootstrap-osd/ceph.keyring
ssh $host "ceph osd create"
ssh $host "ceph-osd -i $osd_num --osd-data /data/osd.$osd_num --osd-journal /data/osd.$osd_num/journal --mkfs --mkkey"
ssh $host "ceph auth add osd.$osd_num osd 'allow *' mon 'allow profile osd' -i /data/osd.$osd_num/keyring"
ssh $host "ceph osd crush add-bucket $host host"
ssh $host "ceph osd crush move $host root=default"
ssh $host "ceph osd crush add osd.$osd_num 1.0 host=$host"
ssh $host "ceph-osd -i $osd_num --osd-data /data/osd.$osd_num --osd-journal /data/osd.$osd_num/journal"
(4) init-mds.sh