forked from cloudfoundry/bosh-deployment
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bosh.yml
144 lines (137 loc) · 3.38 KB
/
bosh.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
---
name: bosh
releases:
- name: bosh
version: "263"
url: https://s3.amazonaws.com/bosh-compiled-release-tarballs/bosh-263-ubuntu-trusty-3445.7-20170901-012146-902840377-20170901012153.tgz?versionId=89a.ZxB3Jc_gl6s4YESlL41xNOfoJKrO
sha1: cc71c2ee6992071b1e1f6ae9f2119c03a42521c5
resource_pools:
- name: vms
network: default
env:
bosh:
password: '*'
mbus:
cert: ((mbus_bootstrap_ssl))
disk_pools:
- name: disks
disk_size: 32_768
networks:
- name: default
type: manual
subnets:
- range: ((internal_cidr))
gateway: ((internal_gw))
static: [((internal_ip))]
dns: [8.8.8.8]
instance_groups:
- name: bosh
instances: 1
jobs:
- {name: nats, release: bosh}
- {name: postgres-9.4, release: bosh}
- {name: blobstore, release: bosh}
- {name: director, release: bosh}
- {name: health_monitor, release: bosh}
resource_pool: vms
persistent_disk_pool: disks
networks:
- name: default
static_ips: [((internal_ip))]
properties:
nats:
address: 127.0.0.1
user: nats
password: ((nats_password))
postgres: &db
listen_address: 127.0.0.1
host: 127.0.0.1
user: postgres
password: ((postgres_password))
database: bosh
adapter: postgres
blobstore:
address: ((internal_ip))
port: 25250
provider: dav
director:
user: director
password: ((blobstore_director_password))
agent:
user: agent
password: ((blobstore_agent_password))
director:
address: 127.0.0.1
name: ((director_name))
db: *db
flush_arp: true
enable_post_deploy: true
generate_vm_passwords: true
enable_dedicated_status_worker: true
enable_nats_delivered_templates: true
workers: 4
events:
record_events: true
ssl:
key: ((director_ssl.private_key))
cert: ((director_ssl.certificate))
user_management:
provider: local
local:
users:
- name: admin
password: ((admin_password))
- name: hm
password: ((hm_password))
hm:
director_account:
user: hm
password: ((hm_password))
ca_cert: ((director_ssl.ca))
resurrector_enabled: true
ntp: &ntp
- time1.google.com
- time2.google.com
- time3.google.com
- time4.google.com
agent:
mbus: nats://nats:((nats_password))@((internal_ip)):4222
cloud_provider:
mbus: https://mbus:((mbus_bootstrap_password))@((internal_ip)):6868
cert: ((mbus_bootstrap_ssl))
properties:
agent: {mbus: "https://mbus:((mbus_bootstrap_password))@0.0.0.0:6868"}
blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache}
ntp: *ntp
variables:
- name: admin_password
type: password
- name: blobstore_director_password
type: password
- name: blobstore_agent_password
type: password
- name: hm_password
type: password
- name: mbus_bootstrap_password
type: password
- name: nats_password
type: password
- name: postgres_password
type: password
- name: default_ca
type: certificate
options:
is_ca: true
common_name: ca
- name: mbus_bootstrap_ssl
type: certificate
options:
ca: default_ca
common_name: ((internal_ip))
alternative_names: [((internal_ip))]
- name: director_ssl
type: certificate
options:
ca: default_ca
common_name: ((internal_ip))
alternative_names: [((internal_ip))]