forked from openstack/devstack
-
Notifications
You must be signed in to change notification settings - Fork 0
/
stackrc
287 lines (230 loc) · 10.5 KB
/
stackrc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
# stackrc
#
# Find the other rc files
RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
# Destination path for installation
DEST=/opt/stack
# Destination for working data
DATA_DIR=${DEST}/data
# Determine stack user
if [[ $EUID -eq 0 ]]; then
STACK_USER=stack
else
STACK_USER=$(whoami)
fi
# Specify which services to launch. These generally correspond to
# screen tabs. To change the default list, use the ``enable_service`` and
# ``disable_service`` functions in ``localrc``.
# For example, to enable Swift add this to ``localrc``:
# enable_service swift
# In order to enable Neutron (a single node setup) add the following
# settings in `` localrc``:
# disable_service n-net
# enable_service q-svc
# enable_service q-agt
# enable_service q-dhcp
# enable_service q-l3
# enable_service q-meta
# enable_service neutron
# # Optional, to enable tempest configuration as part of devstack
# enable_service tempest
ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
# Configure Identity API version: 2.0, 3
IDENTITY_API_VERSION=2.0
# Whether to use 'dev mode' for screen windows. Dev mode works by
# stuffing text into the screen windows so that a developer can use
# ctrl-c, up-arrow, enter to restart the service. Starting services
# this way is slightly unreliable, and a bit slower, so this can
# be disabled for automated testing by setting this value to False.
USE_SCREEN=True
# allow local overrides of env variables, including repo config
if [ -f $RC_DIR/localrc ]; then
source $RC_DIR/localrc
fi
# Repositories
# ------------
# Base GIT Repo URL
# Another option is http://review.openstack.org/p
GIT_BASE=${GIT_BASE:-https://github.com}
# metering service
CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master}
# ceilometer client library
CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git}
CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master}
# volume service
CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
CINDER_BRANCH=${CINDER_BRANCH:-master}
# volume client
CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master}
# image catalog service
GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
GLANCE_BRANCH=${GLANCE_BRANCH:-master}
# python glance client library
GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
# heat service
HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
HEAT_BRANCH=${HEAT_BRANCH:-master}
# python heat client library
HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
# django powered web control panel for openstack
HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
HORIZON_BRANCH=${HORIZON_BRANCH:-master}
# baremetal provisionint service
IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
IRONIC_BRANCH=${IRONIC_BRANCH:-master}
# unified auth system (manages accounts/tokens)
KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
# python keystone client library to nova that horizon uses
KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master}
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-master}
# python client library to nova that horizon (and others) use
NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
# consolidated openstack python client
OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
# oslo.config
OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
# oslo.messaging
OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master}
# pbr drives the setuptools configs
PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
PBR_BRANCH=${PBR_BRANCH:-master}
# neutron service
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
NEUTRON_BRANCH=${NEUTRON_BRANCH:-master}
# neutron client
NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master}
# consolidated openstack requirements
REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
# storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-master}
SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git}
SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
# python swift client library
SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master}
# Tempest test suite
TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
# diskimage-builder
BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git}
BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
# bm_poseur
# Used to simulate a hardware environment for baremetal
# Only used if BM_USE_FAKE_ENV is set
BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
# ryu service
RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
RYU_BRANCH=${RYU_BRANCH:-master}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
SPICE_BRANCH=${SPICE_BRANCH:-master}
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core
# is installed, the default will be XenAPI
DEFAULT_VIRT_DRIVER=libvirt
is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
case "$VIRT_DRIVER" in
libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
if [[ "$os_VENDOR" =~ (Debian) ]]; then
LIBVIRT_GROUP=libvirt
else
LIBVIRT_GROUP=libvirtd
fi
;;
fake)
NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
;;
xenserver)
# Xen config common to nova and neutron
XENAPI_USER=${XENAPI_USER:-"root"}
;;
*)
;;
esac
# Images
# ------
# Specify a comma-separated list of images to download and install into glance.
# Supported urls here are:
# * "uec-style" images:
# If the file ends in .tar.gz, uncompress the tarball and and select the first
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
# and "*-initrd*" as the ramdisk
# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
# * disk image (*.img,*.img.gz)
# if file ends in .img, then it will be uploaded and registered as a to
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
# http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz
# * OpenVZ image:
# OpenVZ uses its own format of image, and does not support UEC style images
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" # cirros full disk image
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
# ``IMAGE_URLS`` to be set directly in ``localrc``.
case "$VIRT_DRIVER" in
openvz)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
libvirt)
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-rootfs}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz"};;
*) # otherwise, use the uec style image (with kernel, ramdisk, disk)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
esac
;;
vsphere)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
*) # Default to Cirros with kernel, ramdisk and disk image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
esac
# 10Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
# Name of the LVM volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
# Set default port for nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
# Compatibility until it's eradicated from CI
USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
# Set default screen name
SCREEN_NAME=${SCREEN_NAME:-stack}
# Do not install packages tagged with 'testonly' by default
INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False}
# Local variables:
# mode: shell-script
# End: