@@ -68,6 +68,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
6868CEPH_REPLICAS=${CEPH_REPLICAS:-1}
6969CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
7070
71+ # Connect to an existing Ceph cluster
72+ REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
73+ REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
74+
75+
7176# Functions
7277# ------------
7378
9297 sudo rm -f secret.xml
9398}
9499
100+ # undefine_virsh_secret() - Undefine Cinder key secret from libvirt
101+ function undefine_virsh_secret {
102+ if is_service_enabled cinder || is_service_enabled nova; then
103+ local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
104+ sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
105+ fi
106+ }
107+
108+
109+ # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
110+ function check_os_support_ceph {
111+ if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
112+ echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
113+ if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
114+ die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
115+ fi
116+ NO_UPDATE_REPOS=False
117+ fi
118+ }
119+
95120# cleanup_ceph() - Remove residual data files, anything left over from previous
96121# runs that a clean run would need to clean up
97- function cleanup_ceph {
122+ function cleanup_ceph_remote {
123+ # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
124+ if is_service_enabled glance; then
125+ sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
126+ sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
127+ fi
128+ if is_service_enabled cinder; then
129+ sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
130+ sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
131+ fi
132+ if is_service_enabled c-bak; then
133+ sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134+ sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
135+ fi
136+ if is_service_enabled nova; then
137+ iniset $NOVA_CONF libvirt rbd_secret_uuid ""
138+ sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
139+ fi
140+ }
141+
142+ function cleanup_ceph_embedded {
98143 sudo pkill -f ceph-mon
99144 sudo pkill -f ceph-osd
100145 sudo rm -rf ${CEPH_DATA_DIR}/*/*
101- sudo rm -rf ${CEPH_CONF_DIR}/*
102146 if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
103147 sudo umount ${CEPH_DATA_DIR}
104148 fi
105149 if [[ -e ${CEPH_DISK_IMAGE} ]]; then
106150 sudo rm -f ${CEPH_DISK_IMAGE}
107151 fi
152+ }
153+
154+ function cleanup_ceph_general {
155+ undefine_virsh_secret
108156 uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
109- if is_service_enabled cinder || is_service_enabled nova; then
110- local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
111- sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
112- fi
113- if is_service_enabled nova; then
114- iniset $NOVA_CONF libvirt rbd_secret_uuid ""
115- fi
157+
158+ # purge ceph config file and keys
159+ sudo rm -rf ${CEPH_CONF_DIR}/*
116160}
117161
162+
118163# configure_ceph() - Set config files, create data dirs, etc
119164function configure_ceph {
120165 local count=0
@@ -130,7 +175,7 @@ function configure_ceph {
130175 sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
131176
132177 # create a default ceph configuration file
133- sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
178+ sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
134179[global]
135180fsid = ${CEPH_FSID}
136181mon_initial_members = $(hostname)
@@ -203,14 +248,17 @@ EOF
203248 done
204249}
205250
206- # configure_ceph_glance() - Glance config needs to come after Glance is set up
207- function configure_ceph_glance {
251+ function configure_ceph_embedded_glance {
208252 # configure Glance service options, ceph pool, ceph user and ceph key
209- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
210253 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
211254 if [[ $CEPH_REPLICAS -ne 1 ]]; then
212255 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
213256 fi
257+ }
258+
259+ # configure_ceph_glance() - Glance config needs to come after Glance is set up
260+ function configure_ceph_glance {
261+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
214262 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
215263 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
216264
@@ -225,14 +273,17 @@ function configure_ceph_glance {
225273 iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
226274}
227275
228- # configure_ceph_nova() - Nova config needs to come after Nova is set up
229- function configure_ceph_nova {
276+ function configure_ceph_embedded_nova {
230277 # configure Nova service options, ceph pool, ceph user and ceph key
231- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
232278 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
233279 if [[ $CEPH_REPLICAS -ne 1 ]]; then
234280 sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
235281 fi
282+ }
283+
284+ # configure_ceph_nova() - Nova config needs to come after Nova is set up
285+ function configure_ceph_nova {
286+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
236287 iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
237288 iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
238289 iniset $NOVA_CONF libvirt inject_key false
@@ -248,15 +299,17 @@ function configure_ceph_nova {
248299 fi
249300}
250301
251- # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
252- function configure_ceph_cinder {
302+ function configure_ceph_embedded_cinder {
253303 # Configure Cinder service options, ceph pool, ceph user and ceph key
254- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
255304 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
256305 if [[ $CEPH_REPLICAS -ne 1 ]]; then
257306 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
258-
259307 fi
308+ }
309+
310+ # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
311+ function configure_ceph_cinder {
312+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
260313 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
261314 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
262315}
@@ -270,15 +323,12 @@ function init_ceph {
270323}
271324
272325# install_ceph() - Collect source and prepare
326+ function install_ceph_remote {
327+ install_package ceph-common
328+ }
329+
273330function install_ceph {
274- # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
275- # leveraging the list in stack.sh
276- if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
277- NO_UPDATE_REPOS=False
278- install_package ceph
279- else
280- exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
281- fi
331+ install_package ceph
282332}
283333
284334# start_ceph() - Start running processes, including screen
0 commit comments