Ceph 文件系统安装(2)

; osd
; You need at least one. Two if you want data to be replicated.
; Define as many as you like.
[osd]
; This is where the btrfs volume will be mounted.
osd data = /data/osd$id

; Ideally, make this a separate disk or partition. A few
; hundred MB should be enough; more if you have fast or many
; disks. You can use a file under the osd data dir if need be
; (e.g. /data/osd$id/journal), but it will be slower than a
; separate disk or partition.

; This is an example of a file-based journal.

osd journal = /data/osd$id/journal
osd journal size = 1000 ; journal size, in megabytes

; osd logging to debug osd issues, in order of likelihood of being
; helpful
;debug ms = 1
;debug osd = 20
;debug filestore = 20
;debug journal = 20

[osd.0]
host = osd1.ihep.ac.cn

; if 'btrfs devs' is not specified, you're responsible for
; setting up the 'osd data' dir. if it is not btrfs, things
; will behave up until you try to recover from a crash (which
; usually fine for basic testing).
btrfs devs = /dev/sda4

[osd.1]
host = osd2.ihep.ac.cn
btrfs devs = /dev/sda4

[osd.2]
host = osd3.ihep.ac.cn
btrfs devs = /dev/sda4

;[osd.3]
; host = eta
; btrfs devs = /dev/sdy

我使用的ceph.conf文件的内容

#!/bin/sh
conf="$1"

## fetch ceph.conf from some remote location and save it to $conf.
##
## make sure this script is executable (chmod +x fetch_config)

##
## examples:
##

## from a locally accessible file

## from a URL:
# wget -q -O $conf

## via scp
# scp -i /path/to/id_dsa
user@host:/path/to/ceph.conf $conf

scp root@mon1.ihep.ac.cn:/usr/local/etc/ceph/ceph/conf $conf 

4. 创建文件系统并启动。下面都做都在监控节点做。 

#mkcephfs -a -c /usr/local/etc/ceph/ceph.conf --mkbtrfs

#mkdir /etc/ceph; cp /usr/local/etc/ceph/* /etc/ceph ##这个我记不清楚了,反正似乎有用

#/etc/init.d/ceph -a start 

5. 挂载 

#mkdir /ceph

#mount.ceph 192.168.56.107:/ /ceph

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:http://www.heiqu.com/10d3c04fa3f258bf018e3422131965fe.html