Skip to content

Commit 4eb04a5

Browse files
author
Sébastien Han
committed
Ability to use a remote Ceph cluster
Sometimes we want to run some benchmarks on virtual machines that will be backed by a Ceph cluster. The first idea that comes in our mind is to use devstack to quickly get an OpenStack up and running but what about the configuration of Devstack with this remote cluster? Thanks to this commit it's now possible to use an already existing Ceph cluster. In this case Devstack just needs two things: * the location of the Ceph config file (by default devstack will look for /etc/ceph/ceph.conf * the admin key of the remote ceph cluster (by default devstack will look for /etc/ceph/ceph.client.admin.keyring) Devstack will then create the necessary pools, users, keys and will connect the OpenStack environment as usual. During the unstack phase every pools, users and keys will be deleted on the remote cluster while local files and ceph-common package will be removed from the current Devstack host. To enable this mode simply add REMOTE_CEPH=True to your localrc file. Change-Id: I1a4b6fd676d50b6a41a09e7beba9b11f8d1478f7 Signed-off-by: Sébastien Han <[email protected]>
1 parent e750f9c commit 4eb04a5

File tree

3 files changed

+125
-43
lines changed

3 files changed

+125
-43
lines changed

extras.d/60-ceph.sh

Lines changed: 41 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,19 @@ if is_service_enabled ceph; then
66
source $TOP_DIR/lib/ceph
77
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
88
echo_summary "Installing Ceph"
9-
install_ceph
10-
echo_summary "Configuring Ceph"
11-
configure_ceph
12-
# NOTE (leseb): Do everything here because we need to have Ceph started before the main
13-
# OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
14-
echo_summary "Initializing Ceph"
15-
init_ceph
16-
start_ceph
9+
check_os_support_ceph
10+
if [ "$REMOTE_CEPH" = "False" ]; then
11+
install_ceph
12+
echo_summary "Configuring Ceph"
13+
configure_ceph
14+
# NOTE (leseb): Do everything here because we need to have Ceph started before the main
15+
# OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
16+
echo_summary "Initializing Ceph"
17+
init_ceph
18+
start_ceph
19+
else
20+
install_ceph_remote
21+
fi
1722
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
1823
if is_service_enabled glance; then
1924
echo_summary "Configuring Glance for Ceph"
@@ -32,14 +37,39 @@ if is_service_enabled ceph; then
3237
echo_summary "Configuring libvirt secret"
3338
import_libvirt_secret_ceph
3439
fi
40+
41+
if [ "$REMOTE_CEPH" = "False" ]; then
42+
if is_service_enabled glance; then
43+
echo_summary "Configuring Glance for Ceph"
44+
configure_ceph_embedded_glance
45+
fi
46+
if is_service_enabled nova; then
47+
echo_summary "Configuring Nova for Ceph"
48+
configure_ceph_embedded_nova
49+
fi
50+
if is_service_enabled cinder; then
51+
echo_summary "Configuring Cinder for Ceph"
52+
configure_ceph_embedded_cinder
53+
fi
54+
fi
3555
fi
3656

3757
if [[ "$1" == "unstack" ]]; then
38-
stop_ceph
39-
cleanup_ceph
58+
if [ "$REMOTE_CEPH" = "True" ]; then
59+
cleanup_ceph_remote
60+
else
61+
cleanup_ceph_embedded
62+
stop_ceph
63+
fi
64+
cleanup_ceph_general
4065
fi
4166

4267
if [[ "$1" == "clean" ]]; then
43-
cleanup_ceph
68+
if [ "$REMOTE_CEPH" = "True" ]; then
69+
cleanup_ceph_remote
70+
else
71+
cleanup_ceph_embedded
72+
fi
73+
cleanup_ceph_general
4474
fi
4575
fi

lib/ceph

Lines changed: 78 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
6868
CEPH_REPLICAS=${CEPH_REPLICAS:-1}
6969
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
7070

71+
# Connect to an existing Ceph cluster
72+
REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
73+
REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
74+
75+
7176
# Functions
7277
# ------------
7378

@@ -92,29 +97,69 @@ EOF
9297
sudo rm -f secret.xml
9398
}
9499

100+
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
101+
function undefine_virsh_secret {
102+
if is_service_enabled cinder || is_service_enabled nova; then
103+
local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
104+
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
105+
fi
106+
}
107+
108+
109+
# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
110+
function check_os_support_ceph {
111+
if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
112+
echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
113+
if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
114+
die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
115+
fi
116+
NO_UPDATE_REPOS=False
117+
fi
118+
}
119+
95120
# cleanup_ceph() - Remove residual data files, anything left over from previous
96121
# runs that a clean run would need to clean up
97-
function cleanup_ceph {
122+
function cleanup_ceph_remote {
123+
# do a proper cleanup from here to avoid leftover on the remote Ceph cluster
124+
if is_service_enabled glance; then
125+
sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
126+
sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
127+
fi
128+
if is_service_enabled cinder; then
129+
sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
130+
sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
131+
fi
132+
if is_service_enabled c-bak; then
133+
sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134+
sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
135+
fi
136+
if is_service_enabled nova; then
137+
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
138+
sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
139+
fi
140+
}
141+
142+
function cleanup_ceph_embedded {
98143
sudo pkill -f ceph-mon
99144
sudo pkill -f ceph-osd
100145
sudo rm -rf ${CEPH_DATA_DIR}/*/*
101-
sudo rm -rf ${CEPH_CONF_DIR}/*
102146
if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
103147
sudo umount ${CEPH_DATA_DIR}
104148
fi
105149
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
106150
sudo rm -f ${CEPH_DISK_IMAGE}
107151
fi
152+
}
153+
154+
function cleanup_ceph_general {
155+
undefine_virsh_secret
108156
uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
109-
if is_service_enabled cinder || is_service_enabled nova; then
110-
local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
111-
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
112-
fi
113-
if is_service_enabled nova; then
114-
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
115-
fi
157+
158+
# purge ceph config file and keys
159+
sudo rm -rf ${CEPH_CONF_DIR}/*
116160
}
117161

162+
118163
# configure_ceph() - Set config files, create data dirs, etc
119164
function configure_ceph {
120165
local count=0
@@ -130,7 +175,7 @@ function configure_ceph {
130175
sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
131176

132177
# create a default ceph configuration file
133-
sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
178+
sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
134179
[global]
135180
fsid = ${CEPH_FSID}
136181
mon_initial_members = $(hostname)
@@ -203,14 +248,17 @@ EOF
203248
done
204249
}
205250

206-
# configure_ceph_glance() - Glance config needs to come after Glance is set up
207-
function configure_ceph_glance {
251+
function configure_ceph_embedded_glance {
208252
# configure Glance service options, ceph pool, ceph user and ceph key
209-
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
210253
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
211254
if [[ $CEPH_REPLICAS -ne 1 ]]; then
212255
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
213256
fi
257+
}
258+
259+
# configure_ceph_glance() - Glance config needs to come after Glance is set up
260+
function configure_ceph_glance {
261+
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
214262
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
215263
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
216264

@@ -225,14 +273,17 @@ function configure_ceph_glance {
225273
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
226274
}
227275

228-
# configure_ceph_nova() - Nova config needs to come after Nova is set up
229-
function configure_ceph_nova {
276+
function configure_ceph_embedded_nova {
230277
# configure Nova service options, ceph pool, ceph user and ceph key
231-
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
232278
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
233279
if [[ $CEPH_REPLICAS -ne 1 ]]; then
234280
sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
235281
fi
282+
}
283+
284+
# configure_ceph_nova() - Nova config needs to come after Nova is set up
285+
function configure_ceph_nova {
286+
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
236287
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
237288
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
238289
iniset $NOVA_CONF libvirt inject_key false
@@ -248,15 +299,17 @@ function configure_ceph_nova {
248299
fi
249300
}
250301

251-
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
252-
function configure_ceph_cinder {
302+
function configure_ceph_embedded_cinder {
253303
# Configure Cinder service options, ceph pool, ceph user and ceph key
254-
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
255304
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
256305
if [[ $CEPH_REPLICAS -ne 1 ]]; then
257306
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
258-
259307
fi
308+
}
309+
310+
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
311+
function configure_ceph_cinder {
312+
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
260313
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
261314
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
262315
}
@@ -270,15 +323,12 @@ function init_ceph {
270323
}
271324

272325
# install_ceph() - Collect source and prepare
326+
function install_ceph_remote {
327+
install_package ceph-common
328+
}
329+
273330
function install_ceph {
274-
# NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
275-
# leveraging the list in stack.sh
276-
if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
277-
NO_UPDATE_REPOS=False
278-
install_package ceph
279-
else
280-
exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
281-
fi
331+
install_package ceph
282332
}
283333

284334
# start_ceph() - Start running processes, including screen

lib/cinder_backends/ceph

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,13 @@ function configure_cinder_backend_ceph {
5252
iniset $CINDER_CONF DEFAULT glance_api_version 2
5353

5454
if is_service_enabled c-bak; then
55-
# Configure Cinder backup service options, ceph pool, ceph user and ceph key
5655
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
57-
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
58-
if [[ $CEPH_REPLICAS -ne 1 ]]; then
59-
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
56+
if [ "$REMOTE_CEPH" = "False" ]; then
57+
# Configure Cinder backup service options, ceph pool, ceph user and ceph key
58+
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
59+
if [[ $CEPH_REPLICAS -ne 1 ]]; then
60+
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
61+
fi
6062
fi
6163
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
6264
sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring

0 commit comments

Comments
 (0)