Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                

Ceph Deploy

Download as txt, pdf, or txt
Download as txt, pdf, or txt
You are on page 1of 12

Install Distributed File System Ceph to Configure Storage Cluster.

For example on here, Configure Ceph Cluster with 3 Nodes like follows.
Furthermore, each Storage Node has a free block device to use on Ceph Nodes.
(use [/dev/sdb] on this example)

|
+----------------------------+----------------------------+
| | |
|10.0.0.51 |10.0.0.52 |10.0.0.53
+-----------+-----------+ +-----------+-----------+ +-----------+-----------+
| [node01.srv.world] | | [node02.srv.world] | | [node03.srv.world] |
| Object Storage +----+ Object Storage +----+ Object Storage |
| Monitor Daemon | | | | |
| Manager Daemon | | | | |
+-----------------------+ +-----------------------+ +-----------------------+

[1] Generate SSH key-pair on [Monitor Daemon] Node (call it Admin Node on here)
and set it to each Node.
Configure key-pair with no-passphrase as [root] account on here.
If you use a common account, it also needs to configure Sudo.
If you set passphrase to SSH kay-pair, it also needs to set SSH Agent.
[root@node01 ~]# ssh-keygen -q -N ""

Enter file in which to save the key (/root/.ssh/id_rsa):

[root@node01 ~]# vi ~/.ssh/config


# create new (define each Node and SSH user)

Host node01
Hostname node01.srv.world
User root
Host node02
Hostname node02.srv.world
User root
Host node03
Hostname node03.srv.world
User root

[root@node01 ~]# chmod 600 ~/.ssh/config


# transfer public key

[root@node01 ~]# ssh-copy-id node01

root@node01.srv.world's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'node01'"


and check to make sure that only the key(s) you wanted were added.

[root@node01 ~]# ssh-copy-id node02

[root@node01 ~]# ssh-copy-id node03

[2] Install Ceph to each Node from Admin Node.

[root@node01 ~]# for NODE in node01 node02 node03


do
ssh $NODE "dnf -y install centos-release-ceph-reef epel-release; dnf -y install
ceph"
done

[3] Configure [Monitor Daemon], [Manager Daemon] on Admin Node.


[root@node01 ~]# uuidgen

f2e52449-e87b-4786-981e-1f1f58186a7c
# create new config
# file name ⇒ (any Cluster Name).conf
# set Cluster Name [ceph] (default) on this example ⇒ [ceph.conf]

[root@node01 ~]# vi /etc/ceph/ceph.conf

[global]
# specify cluster network for monitoring
cluster network = 10.0.0.0/24
# specify public network
public network = 10.0.0.0/24
# specify UUID genarated above
fsid = f2e52449-e87b-4786-981e-1f1f58186a7c
# specify IP address of Monitor Daemon
mon host = 10.0.0.51
# specify Hostname of Monitor Daemon
mon initial members = node01
osd pool default crush rule = -1

# mon.(Node name)
[mon.node01]
# specify Hostname of Monitor Daemon
host = node01
# specify IP address of Monitor Daemon
mon addr = 10.0.0.51
# allow to delete pools
mon allow pool delete = true

# generate secret key for Cluster monitoring

[root@node01 ~]# ceph-authtool --create-keyring /etc/ceph/ceph.mon.keyring --gen-


key -n mon. --cap mon 'allow *'

creating /etc/ceph/ceph.mon.keyring
# generate secret key for Cluster admin

[root@node01 ~]# ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring


--gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow
*' --cap mgr 'allow *'

creating /etc/ceph/ceph.client.admin.keyring
# generate key for bootstrap

[root@node01 ~]# ceph-authtool --create-keyring


/var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap
mon 'profile bootstrap-osd' --cap mgr 'allow r'

creating /var/lib/ceph/bootstrap-osd/ceph.keyring
# import generated key

[root@node01 ~]# ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring


/etc/ceph/ceph.client.admin.keyring

importing contents of /etc/ceph/ceph.client.admin.keyring into


/etc/ceph/ceph.mon.keyring
[root@node01 ~]# ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring

importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into


/etc/ceph/ceph.mon.keyring
# generate monitor map

[root@node01 ~]# FSID=$(grep "^fsid" /etc/ceph/ceph.conf | awk {'print $NF'})

[root@node01 ~]# NODENAME=$(grep "^mon initial" /etc/ceph/ceph.conf | awk {'print


$NF'})

[root@node01 ~]# NODEIP=$(grep "^mon host" /etc/ceph/ceph.conf | awk {'print $NF'})

[root@node01 ~]# monmaptool --create --add $NODENAME $NODEIP --fsid $FSID


/etc/ceph/monmap

monmaptool: monmap file /etc/ceph/monmap


setting min_mon_release = pacific
monmaptool: set fsid to f2e52449-e87b-4786-981e-1f1f58186a7c
monmaptool: writing epoch 0 to /etc/ceph/monmap (1 monitors)
# create a directory for Monitor Daemon
# directory name ⇒ (Cluster Name)-(Node Name)

[root@node01 ~]# mkdir /var/lib/ceph/mon/ceph-node01


# associate key and monmap to Monitor Daemon
# --cluster (Cluster Name)

[root@node01 ~]# ceph-mon --cluster ceph --mkfs -i $NODENAME --monmap


/etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring

[root@node01 ~]# chown ceph:ceph /etc/ceph/ceph.*

[root@node01 ~]# chown -R ceph:ceph /var/lib/ceph/mon/ceph-node01


/var/lib/ceph/bootstrap-osd

[root@node01 ~]# systemctl enable --now ceph-mon@$NODENAME


# enable Messenger v2 Protocol

[root@node01 ~]# ceph mon enable-msgr2


[root@node01 ~]# ceph config set mon auth_allow_insecure_global_id_reclaim false
# enable Placement Groups auto scale module

[root@node01 ~]# ceph mgr module enable pg_autoscaler


# create a directory for Manager Daemon # directory name ⇒ (Cluster Name)-(Node
Name)

[root@node01 ~]# mkdir /var/lib/ceph/mgr/ceph-node01


# create auth key

[root@node01 ~]# ceph auth get-or-create mgr.$NODENAME mon 'allow profile mgr' osd
'allow *' mds 'allow *'

[mgr.node01]
key = AQB7seJk/PK8ARAAnPnPxdr+6Npqxz92J3flng==
[root@node01 ~]# ceph auth get-or-create mgr.node01 >
/etc/ceph/ceph.mgr.admin.keyring

[root@node01 ~]# cp /etc/ceph/ceph.mgr.admin.keyring /var/lib/ceph/mgr/ceph-


node01/keyring

[root@node01 ~]# chown ceph:ceph /etc/ceph/ceph.mgr.admin.keyring

[root@node01 ~]# chown -R ceph:ceph /var/lib/ceph/mgr/ceph-node01

[root@node01 ~]# systemctl enable --now ceph-mgr@$NODENAME

[4] On Admin Node, If SELinux is enabled, change policy settings.


[root@node01 ~]# vi cephmon.te
# create new

module cephmon 1.0;

require {
type ceph_t;
type ptmx_t;
type initrc_var_run_t;
type sudo_exec_t;
type chkpwd_exec_t;
type shadow_t;
class file { execute execute_no_trans lock getattr map open read };
class capability { audit_write sys_resource };
class process setrlimit;
class netlink_audit_socket { create nlmsg_relay };
class chr_file getattr;
}

#============= ceph_t ==============


allow ceph_t initrc_var_run_t:file { lock open read };
allow ceph_t self:capability { audit_write sys_resource };
allow ceph_t self:netlink_audit_socket { create nlmsg_relay };
allow ceph_t self:process setrlimit;
allow ceph_t sudo_exec_t:file { execute execute_no_trans open read map };
allow ceph_t ptmx_t:chr_file getattr;
allow ceph_t chkpwd_exec_t:file { execute execute_no_trans open read map };
allow ceph_t shadow_t:file { getattr open read };

[root@node01 ~]# checkmodule -m -M -o cephmon.mod cephmon.te

[root@node01 ~]# semodule_package --outfile cephmon.pp --module cephmon.mod

[root@node01 ~]# semodule -i cephmon.pp

[5] On Admin Node, If Firewalld is running, allow service ports.


[root@node01 ~]# firewall-cmd --add-service=ceph-mon

success
[root@node01 ~]# firewall-cmd --runtime-to-permanent

success
[6] Confirm Cluster status. That's OK if [Monitor Daemon] and [Manager Daemon]
are enabled like follows.
For OSD (Object Storage Device), Configure them on next section, so it's no problem
if [HEALTH_WARN] at this point.
[root@node01 ~]# ceph -s

cluster:
id: f2e52449-e87b-4786-981e-1f1f58186a7c
health: HEALTH_WARN
OSD count 0 > osd_pool_default_size 3

services:
mon: 1 daemons, quorum node01 (age 2m)
mgr: node01(active, since 34s)
osd: 0 osds: 0 up, 0 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:

[2] Configure OSD (Object Storage Device) to each Node from Admin Node.
Block devices ([/dev/sdb] on this example) are formatted for OSD, Be careful if
some existing data are saved.
# if Firewalld is running on each Node, allow ports

[root@node01 ~]# for NODE in node01 node02 node03


do
ssh $NODE "firewall-cmd --add-service=ceph; firewall-cmd --runtime-to-
permanent"
done

# configure settings for OSD to each Node

[root@node01 ~]# for NODE in node01 node02 node03


do
if [ ! ${NODE} = "node01" ]
then
scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
scp /var/lib/ceph/bootstrap-osd/ceph.keyring
${NODE}:/var/lib/ceph/bootstrap-osd
fi
ssh $NODE \
"chown ceph:ceph /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1"
done

Running command: /usr/bin/ceph-authtool --gen-print-key


Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 02f5a7be-c477-4e11-9718-
28625d2334c6
Running command: vgcreate --force --yes ceph-79c35013-5c06-4a66-956e-
15eebec60fb5 /dev/sdb1
stdout: Physical volume "/dev/sdb1" successfully created.
stdout: Volume group "ceph-79c35013-5c06-4a66-956e-15eebec60fb5" successfully
created
Running command: lvcreate --yes -l 40959 -n osd-block-02f5a7be-c477-4e11-9718-
28625d2334c6 ceph-79c35013-5c06-4a66-956e-15eebec60fb5
stdout: Logical volume "osd-block-02f5a7be-c477-4e11-9718-28625d2334c6" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-79c35013-5c06-4a66-956e-
15eebec60fb5/osd-block-02f5a7be-c477-4e11-9718-28625d2334c6
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-79c35013-5c06-4a66-956e-15eebec60fb5/osd-
block-02f5a7be-c477-4e11-9718-28625d2334c6 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o
/var/lib/ceph/osd/ceph-0/activate.monmap
stderr: got monmap epoch 2
--> Creating keyring file for osd.0
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --
mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-
data /var/lib/ceph/osd/ceph-0/ --osd-uuid 02f5a7be-c477-4e11-9718-28625d2334c6 --
setuser ceph --setgroup ceph
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev
/dev/ceph-79c35013-5c06-4a66-956e-15eebec60fb5/osd-block-02f5a7be-c477-4e11-9718-
28625d2334c6 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Running command: /usr/bin/ln -snf
/dev/ceph-79c35013-5c06-4a66-956e-15eebec60fb5/osd-block-02f5a7be-c477-4e11-9718-
28625d2334c6 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-02f5a7be-c477-4e11-
9718-28625d2334c6
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-
volume@lvm-0-02f5a7be-c477-4e11-9718-28625d2334c6.service →
/usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-
osd@0.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@0
--> ceph-volume lvm activate successful for osd ID: 0
--> ceph-volume lvm create successful for: /dev/sdb1
ceph.conf 100% 273 351.2KB/s 00:00
ceph.client.admin.keyring 100% 151 200.9KB/s 00:00
ceph.keyring 100% 129 175.1KB/s 00:00
.....
.....

# confirm cluster status


# that's OK if [HEALTH_OK]

[root@node01 ~]# ceph -s

cluster:
id: f2e52449-e87b-4786-981e-1f1f58186a7c
health: HEALTH_OK

services:
mon: 1 daemons, quorum node01 (age 13m)
mgr: node01(active, since 11m)
osd: 3 osds: 3 up (since 4m), 3 in (since 4m)

data:
pools: 1 pools, 1 pgs
objects: 2 objects, 449 KiB
usage: 80 MiB used, 480 GiB / 480 GiB avail
pgs: 1 active+clean

# confirm OSD tree

[root@node01 ~]# ceph osd tree

ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF


-1 0.46857 root default
-3 0.15619 host node01
0 hdd 0.15619 osd.0 up 1.00000 1.00000
-5 0.15619 host node02
1 hdd 0.15619 osd.1 up 1.00000 1.00000
-7 0.15619 host node03
2 hdd 0.15619 osd.2 up 1.00000 1.00000

[root@node01 ~]# ceph df


--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 480 GiB 480 GiB 80 MiB 80 MiB 0.02
TOTAL 480 GiB 480 GiB 80 MiB 80 MiB 0.02

--- POOLS ---


POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 152 GiB

[root@node01 ~]# ceph osd df


ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL
%USE VAR PGS STATUS
0 hdd 0.15619 1.00000 160 GiB 27 MiB 580 KiB 0 B 26 MiB 160 GiB
0.02 1.00 1 up
1 hdd 0.15619 1.00000 160 GiB 27 MiB 580 KiB 0 B 26 MiB 160 GiB
0.02 1.00 1 up
2 hdd 0.15619 1.00000 160 GiB 27 MiB 580 KiB 0 B 26 MiB 160 GiB
0.02 1.00 1 up
TOTAL 480 GiB 80 MiB 1.7 MiB 0 B 78 MiB 480 GiB
0.02
MIN/MAX VAR: 1.00/1.00 STDDEV: 0

Enable Ceph Object Gateway (RADOSGW) to access to Ceph Cluster Storage via Amazon
S3 or OpenStack Swift compatible API.
This example is based on the environment like follows.

|
+--------------------+ | +----------------------+
| [dlp.srv.world] |10.0.0.30 | 10.0.0.31| [www.srv.world] |
| Ceph Client +-----------+-----------+ RADOSGW |
| | | | |
+--------------------+ | +----------------------+
+----------------------------+----------------------------+
| | |
|10.0.0.51 |10.0.0.52 |10.0.0.53
+-----------+-----------+ +-----------+-----------+ +-----------+-----------+
| [node01.srv.world] | | [node02.srv.world] | | [node03.srv.world] |
| Object Storage +----+ Object Storage +----+ Object Storage |
| Monitor Daemon | | | | |
| Manager Daemon | | | | |
+-----------------------+ +-----------------------+ +-----------------------+

[1] Transfer required files to RADOSGW Node and Configure it from Admin Node.
# transfer public key

[root@node01 ~]# ssh-copy-id www

# install required packages for RADOSGW

[root@node01 ~]# ssh www "dnf -y install centos-release-ceph-reef epel-release; dnf


-y install ceph-radosgw"
[root@node01 ~]# vi /etc/ceph/ceph.conf

# add to the end


# client.rgw.(Node Name)
[client.rgw.www]
# IP address of the Node
host = 10.0.0.31
# DNS name
rgw dns name = www.srv.world
keyring = /var/lib/ceph/radosgw/ceph-rgw.www/keyring
log file = /var/log/ceph/radosgw.gateway.log

# transfer files

[root@node01 ~]# scp /etc/ceph/ceph.conf www:/etc/ceph/

ceph.conf 100% 374 210.9KB/s 00:00

[root@node01 ~]# scp /etc/ceph/ceph.client.admin.keyring www:/etc/ceph/

ceph.client.admin.keyring 100% 151 56.0KB/s 00:00

# configure RADOSGW

[root@node01 ~]# ssh www \


"mkdir -p /var/lib/ceph/radosgw/ceph-rgw.www; \
ceph auth get-or-create client.rgw.www osd 'allow rwx' mon 'allow rw' -o
/var/lib/ceph/radosgw/ceph-rgw.www/keyring; \
chown ceph:ceph /etc/ceph/ceph.*; \
chown -R ceph:ceph /var/lib/ceph/radosgw; \
systemctl enable --now ceph-radosgw@rgw.www; \
firewall-cmd --add-port=7480/tcp; firewall-cmd --runtime-to-permanent"

# verify status
# that's OK if follwing answers shown after a few seconds
[root@node01 ~]# curl www.srv.world:7480

<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult


xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</
ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

[2] On Object Gateway Node, Create a S3 compatible user who can authenticate to
Object Gateway.
# for example, create [serverworld] user

[root@www ~]# radosgw-admin user create --uid=serverworld --display-name="Server


World" --email=admin@srv.world

{
"user_id": "serverworld",
"display_name": "Server World",
"email": "admin@srv.world",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "serverworld",
"access_key": "9YRQNWJ1CG6DH69KL2RT",
"secret_key": "Ht07yUzoQFKOeFcMC0Dn9DkAJHqBn2M75mUmC78T"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"default_storage_class": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}

# show user list

[root@www ~]# radosgw-admin user list

[
"serverworld"
]

[root@www ~]# radosgw-admin user info --uid=serverworld

{
"user_id": "serverworld",
"display_name": "Server World",
"email": "admin@srv.world",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "serverworld",
"access_key": "9YRQNWJ1CG6DH69KL2RT",
"secret_key": "Ht07yUzoQFKOeFcMC0Dn9DkAJHqBn2M75mUmC78T"
}
.....
.....

[3] Verify accessing with S3 interface to create Python test script on a Computer
with a common user.
[cent@dlp ~]$ pip3 install boto3
[cent@dlp ~]$ vi s3_test.py

import sys
import boto3
from botocore.config import Config

# user's access-key and secret-key you added on [2] section


session = boto3.session.Session(
aws_access_key_id = '9YRQNWJ1CG6DH69KL2RT',
aws_secret_access_key = 'Ht07yUzoQFKOeFcMC0Dn9DkAJHqBn2M75mUmC78T'
)

# Object Gateway URL


s3client = session.client(
's3',
endpoint_url = 'http://10.0.0.31:7480',
config = Config()
)

# create [my-new-bucket]
bucket = s3client.create_bucket(Bucket = 'my-new-bucket')

# list Buckets
print(s3client.list_buckets())

# remove [my-new-bucket]
s3client.delete_bucket(Bucket = 'my-new-bucket')

[cent@dlp ~]$ python3 s3_test.py

{'ResponseMetadata': {'RequestId': 'tx000000f58684e2cee61ef-0064e2c7ef-37db-


default', 'HostId': '', 'HTTPStatusCode': 200, 'HTTPHeaders': {'transfer-encoding':
'chunked', 'x-amz-request-id': 'tx000000f58684e2cee61ef-0064e2c7ef-37db-default',
'content-type': 'application/xml', 'date': 'Mon, 21 Aug 2023 02:11:59 GMT',
'connection': 'Keep-Alive'}, 'RetryAttempts': 0}, 'Buckets': [{'Name': 'my-new-
bucket', 'CreationDate': datetime.datetime(2023, 8, 21, 2, 11, 56, 671000,
tzinfo=tzutc())}], 'Owner': {'DisplayName': 'Server World', 'ID': 'serverworld'}}

Enable Ceph Dashboard to manage Ceph Cluster on Web Console.


This example is based on the environment like follows.

|
+--------------------+ | +----------------------+
| [dlp.srv.world] |10.0.0.30 | 10.0.0.31| [www.srv.world] |
| Ceph Client +-----------+-----------+ RADOSGW |
| | | | |
+--------------------+ | +----------------------+
+----------------------------+----------------------------+
| | |
|10.0.0.51 |10.0.0.52 |10.0.0.53
+-----------+-----------+ +-----------+-----------+ +-----------+-----------+
| [node01.srv.world] | | [node02.srv.world] | | [node03.srv.world] |
| Object Storage +----+ Object Storage +----+ Object Storage |
| Monitor Daemon | | | | |
| Manager Daemon | | | | |
+-----------------------+ +-----------------------+ +-----------------------+

[1] Enable Dashboard module on [Manager Daemon] Node.


Furthermore, Dashboard requires SSL/TLS. Create a self-signed certificate on this
example.
[root@node01 ~]# dnf install ceph-mgr-dashboard
[root@node01 ~]# ceph mgr module enable dashboard

[root@node01 ~]# ceph mgr module ls | grep dashboard

dashboard on

# create self-signed certificate

[root@node01 ~]# ceph dashboard create-self-signed-cert

Self-signed certificate created


# create a user for Dashboard
# [ceph dashboard ac-user-create (username) -i (password file) administrator]

[root@node01 ~]# echo "password" > pass.txt

[root@node01 ~]# ceph dashboard ac-user-create serverworld -i pass.txt


administrator

{"username": "serverworld", "password":


"$2b$12$Ja/1YyevqBPLtjbOLCfaZerxebudbcX1zxLYHRuMVWi3bY3qYlj7O", "roles":
["administrator"], "name": null, "email": null, "lastUpdate": 1692584152,
"enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": false}

# confirm Dashboard URL

[root@node01 ~]# ceph mgr services

{
"dashboard": "https://10.0.0.51:8443/"
}
[2] On Dashboard Host, If Firewalld is running, allow service ports.
[root@node01 ~]# firewall-cmd --add-port=8443/tcp

[root@node01 ~]# firewall-cmd --runtime-to-permanent

[3] Access to the Dashboard URL from a Client Computer with Web Browser, then
Ceph Dashboard Login form is shown. Login as a user you just added in [1] section.
After login, it's possible to see various status of Ceph Cluster.

You might also like