We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ceph -s
ceph osd tree
rados lspools
rados mkpool {pool_name}
rados -p {pool_name} ls
rados df
rbd info -p {pool_name} {image_name}
rbd ls -p {pool_name}
parted /dev/nvme1n1
mklabel
gpt
mkpart osd-service-3-data 0G 30G
mkpart osd-service-3-wal 30G 60G
mkpart osd-device-3-db 60G 90G
mkpart osd-device-3-block 90G 150G
mkfs.xfs /dev/nvme1n1p1
ceph osd out {osd-num}
ceph osd crush remove {name}
ceph auth del osd.{osd-num}
ceph osd rm {osd-num}
ceph osd create
ceph-osd -i 1 --mkfs --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /zm3/osd.1/keyring
ceph osd crush add osd.1 1.0 host=sw211
ceph-osd -i 1
The text was updated successfully, but these errors were encountered:
ceph osd pool create cephfs_data 64
ceph osd pool create cephfs_metadata 64
ceph fs new cephfs cephfs_metadata cephfs_data
ceph fs ls
ceph mds stat
apt-get install ceph-mds
mkdir /var/lib/ceph/mds
mkdir /var/lib/ceph/mds/ceph-mdsa
ceph auth get-or-create mds.mdsa mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-mdsa/keyring
systemctl start ceph-mds@mdsa
ceph-fuse --id admin -m 192.168.31.214:6789 /zm3/mnt/
ceph-fuse --id admin -m 192.168.31.214:6789 /zm3/test/
fusermount -zu /zm3/test/
./configure --build=alpha-unknown-linux-gnu
make & make install
./filebench-1.5-alpha3/filebench -f /usr/local/share/filebench/workloads/createfiles.f
ceph fs get ${fs_name}
ceph fs set ${fs_name} max_mds 2
mkdir /var/lib/ceph/mds/ceph-mds.b
ceph auth get-or-create mds.mds.b mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-mds.b/keyring
ceph-mds -i mds.b
ceph-f status
setfattr -n ceph.dir.pin -v {rank} {dir}
ceph fs set ${fs_name} max_mds 1
ceph mds deactivate {rank}
Sorry, something went wrong.
No branches or pull requests
Deploy Ceph
Ceph Cluster
ceph -s
// show ceph statusceph osd tree
// show osd tree of cephRADOS
rados lspools
// list poolsrados mkpool {pool_name}
// create rados pool and you can assign crush rule and auid.rados -p {pool_name} ls
// list objects in rados poolrados df
// show per-pool and total usageRBD
rbd info -p {pool_name} {image_name}
// Check info of given imagerbd ls -p {pool_name}
// list images of given poolOSD
Disk Part
nvme ssd
parted /dev/nvme1n1
mklabel
gpt
mkpart osd-service-3-data 0G 30G
mkpart osd-service-3-wal 30G 60G
mkpart osd-device-3-db 60G 90G
mkpart osd-device-3-block 90G 150G
mkfs.xfs /dev/nvme1n1p1
Remove OSD
ceph osd out {osd-num}
// out osdceph osd crush remove {name}
ceph auth del osd.{osd-num}
ceph osd rm {osd-num}
Create OSD
ceph osd create
ceph-osd -i 1 --mkfs --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /zm3/osd.1/keyring
ceph osd crush add osd.1 1.0 host=sw211
ceph-osd -i 1
The text was updated successfully, but these errors were encountered: