-
Notifications
You must be signed in to change notification settings - Fork 0
/
openstack_kolla_ansible.xml
387 lines (338 loc) · 16.4 KB
/
openstack_kolla_ansible.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
<?xml version="1.0" encoding="UTF-8"?>
<!--
~~~~~~~~~~~~~~~~~~~~~~
VNX Sample scenarios
~~~~~~~~~~~~~~~~~~~~~~
Name: openstack_kolla_ansible
Description: This is an Openstack tutorial scenario designed to experiment with Openstack free and open-source
software platform for cloud-computing. It is made of four kvm machines:
- one controller
- one network node
- two compute nodes
Kolla-ansible is used to provision OpenStack platform.
Openstack version used: Wallaby.
The network configuration is based on the one named "Classic with Open vSwitch" described here:
http://docs.openstack.org/liberty/networking-guide/scenario-classic-ovs.html
Author: Nacho Dominguez (i.dominguezm@upm.es)
This file is part of the Virtual Networks over LinuX (VNX) Project distribution.
(www: http://www.dit.upm.es/vnx - e-mail: vnx@dit.upm.es)
Copyright (C) 2021 Networking and Virtualization Research Group (GIROS)
Departamento de Ingenieria de Sistemas Telematicos (DIT)
Universidad Politecnica de Madrid (UPM)
SPAIN
-->
<vnx xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="/usr/share/xml/vnx/vnx-2.00.xsd">
<global>
<version>2.0</version>
<scenario_name>openstack_kolla_openstack</scenario_name>
<ssh_key>conf/ssh/id_rsa.pub</ssh_key>
<automac/>
<vm_mgmt type="private" network="192.168.50.0" mask="24" offset="16">
<host_mapping />
</vm_mgmt>
<vm_defaults>
<console id="0" display="no"/>
<console id="1" display="yes"/>
</vm_defaults>
</global>
<net name="MgmtNet" mode="openvswitch" mtu="1450"/>
<net name="TunnNet" mode="openvswitch" mtu="1450"/>
<net name="ExtNet" mode="openvswitch" />
<net name="VlanNet" mode="openvswitch" />
<net name="virbr0" mode="virtual_bridge" managed="no"/>
<!--
~~
~~ A D M I N N O D E
~~
-->
<vm name="admin" type="lxc" exec_mode="lxc-attach" arch="x86_64">
<filesystem type="cow">/usr/share/vnx/filesystems/rootfs_lxc_ubuntu64</filesystem>
<if id="1" net="MgmtNet">
<ipv4>10.0.0.1/24</ipv4>
</if>
<if id="9" net="virbr0">
<ipv4>dhcp</ipv4>
</if>
<filetree seq="on_boot" root="/usr/bin/">/usr/bin/vnx_config_nat</filetree>
<exec seq="on_boot" type="verbatim">
dhclient eth9
apt-get update
apt-get install -y iptables
/usr/bin/vnx_config_nat eth1 eth9
</exec>
</vm>
<!--
~~
~~ C O N T R O L L E R N O D E
~~
-->
<vm name="controller" type="libvirt" subtype="kvm" os="linux" exec_mode="sdisk" arch="x86_64" vcpu="2">
<filesystem type="cow">/usr/share/vnx/filesystems/rootfs_ubuntu64</filesystem>
<mem>4G</mem>
<!--console id="0" display="yes"/-->
<if id="1" net="MgmtNet">
<ipv4>10.0.0.11/24</ipv4>
</if>
<if id="4" net="ExtNet">
<ipv4>10.0.10.11/24</ipv4>
</if>
<!--if id="9" net="virbr0">
<ipv4>dhcp</ipv4>
</if-->
<route type="ipv4" gw="10.0.0.1">default</route>
<!-- Copy ntp config and restart service -->
<!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
between the vms/containers and the host -->
<!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-controller.conf</filetree>
<exec seq="on_boot" type="verbatim">
service chrony restart
</exec-->
<exec seq="on_boot" type="verbatim">
# Change MgmtNet and TunnNet interfaces MTU
ifconfig eth1 mtu 1450
sed -i -e '/iface eth1 inet static/a \ mtu 1450' /etc/network/interfaces
systemd-resolve --set-dns=8.8.8.8 --interface=eth1
</exec>
</vm>
<!--
~~
~~ N E T W O R K N O D E
~~
-->
<vm name="network" type="libvirt" subtype="kvm" os="linux" exec_mode="sdisk" arch="x86_64" vcpu="2">
<filesystem type="cow">/usr/share/vnx/filesystems/rootfs_ubuntu64</filesystem>
<!--vm name="network" type="libvirt" subtype="kvm" os="linux" exec_mode="sdisk" arch="x86_64">
<filesystem type="cow">filesystems/rootfs_kvm_ubuntu64-ostack-network</filesystem-->
<mem>2G</mem>
<if id="1" net="MgmtNet">
<ipv4>10.0.0.21/24</ipv4>
</if>
<if id="2" net="TunnNet">
<ipv4>10.0.1.21/24</ipv4>
</if>
<if id="3" net="VlanNet">
</if>
<if id="4" net="ExtNet">
</if>
<!--if id="9" net="virbr0">
<ipv4>dhcp</ipv4>
</if-->
<route type="ipv4" gw="10.0.0.1">default</route>
<forwarding type="ip" />
<forwarding type="ipv6" />
<exec seq="on_boot" type="verbatim">
# Change MgmtNet and TunnNet interfaces MTU
ifconfig eth1 mtu 1450
sed -i -e '/iface eth1 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth2 mtu 1450
sed -i -e '/iface eth2 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth3 mtu 1450
sed -i -e '/iface eth3 inet static/a \ mtu 1450' /etc/network/interfaces
systemd-resolve --set-dns=8.8.8.8 --interface=eth1
</exec>
<!-- Copy ntp config and restart service -->
<!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
between the vms/containers and the host -->
<!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
<exec seq="on_boot" type="verbatim">
service chrony restart
</exec-->
</vm>
<!--
~~
~~ C O M P U T E 1 N O D E
~~
-->
<vm name="compute1" type="libvirt" subtype="kvm" os="linux" exec_mode="sdisk" arch="x86_64" vcpu="2">
<filesystem type="cow">/usr/share/vnx/filesystems/rootfs_ubuntu64</filesystem>
<mem>2G</mem>
<if id="1" net="MgmtNet">
<ipv4>10.0.0.31/24</ipv4>
</if>
<if id="2" net="TunnNet">
<ipv4>10.0.1.31/24</ipv4>
</if>
<if id="3" net="VlanNet">
</if>
<if id="4" net="ExtNet">
</if>
<!--if id="9" net="virbr0">
<ipv4>dhcp</ipv4>
</if-->
<route type="ipv4" gw="10.0.0.1">default</route>
<exec seq="on_boot" type="verbatim">
# Create /dev/net/tun device
#mkdir -p /dev/net/
#mknod -m 666 /dev/net/tun c 10 200
# Change MgmtNet and TunnNet interfaces MTU
ifconfig eth1 mtu 1450
sed -i -e '/iface eth1 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth2 mtu 1450
sed -i -e '/iface eth2 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth3 mtu 1450
sed -i -e '/iface eth3 inet static/a \ mtu 1450' /etc/network/interfaces
systemd-resolve --set-dns=8.8.8.8 --interface=eth1
</exec>
<!-- Copy ntp config and restart service -->
<!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
between the vms/containers and the host -->
<!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
<exec seq="on_boot" type="verbatim">
service chrony restart
</exec-->
</vm>
<!--
~~~
~~~ C O M P U T E 2 N O D E
~~~
-->
<vm name="compute2" type="libvirt" subtype="kvm" os="linux" exec_mode="sdisk" arch="x86_64" vcpu="2">
<filesystem type="cow">/usr/share/vnx/filesystems/rootfs_ubuntu64</filesystem>
<mem>2G</mem>
<if id="1" net="MgmtNet">
<ipv4>10.0.0.32/24</ipv4>
</if>
<if id="2" net="TunnNet">
<ipv4>10.0.1.32/24</ipv4>
</if>
<if id="3" net="VlanNet">
</if>
<if id="4" net="ExtNet">
</if>
<!--if id="9" net="virbr0">
<ipv4>dhcp</ipv4>
</if-->
<route type="ipv4" gw="10.0.0.1">default</route>
<exec seq="on_boot" type="verbatim">
# Create /dev/net/tun device
#mkdir -p /dev/net/
#mknod -m 666 /dev/net/tun c 10 200
# Change MgmtNet and TunnNet interfaces MTU
ifconfig eth1 mtu 1450
sed -i -e '/iface eth1 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth2 mtu 1450
sed -i -e '/iface eth2 inet static/a \ mtu 1450' /etc/network/interfaces
ifconfig eth3 mtu 1450
sed -i -e '/iface eth3 inet static/a \ mtu 1450' /etc/network/interfaces
systemd-resolve --set-dns=8.8.8.8 --interface=eth1
</exec>
<!-- Copy ntp config and restart service -->
<!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
between the vms/containers and the host -->
<!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
<exec seq="on_boot" type="verbatim">
service chrony restart
</exec-->
</vm>
<!--
~~
~~ H O S T N O D E
~~
-->
<host>
<hostif net="ExtNet">
<ipv4>10.0.10.1/24</ipv4>
</hostif>
<hostif net="MgmtNet">
<ipv4>10.0.0.2/24</ipv4>
</hostif>
<exec seq="create-extnet" type="verbatim">
. conf/admin-openrc.sh
openstack network create --share --external --provider-physical-network physnet2 --provider-network-type flat ExtNet
openstack subnet create --network ExtNet --gateway 10.0.10.1 --dns-nameserver 10.0.10.1 --subnet-range 10.0.10.0/24 --allocation-pool start=10.0.10.100,end=10.0.10.200 ExtSubNet
</exec>
<exec seq="load-img" type="verbatim">
#dhclient eth9 # just in case the Internet connection is not active...
. conf/admin-openrc.sh
# Create flavors if not created
openstack flavor show m1.nano >/dev/null 2>&1 || openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
openstack flavor show m1.tiny >/dev/null 2>&1 || openstack flavor create --id 1 --vcpus 1 --ram 512 --disk 1 m1.tiny
openstack flavor show m1.smaller >/dev/null 2>&1 || openstack flavor create --id 6 --vcpus 1 --ram 512 --disk 3 m1.smaller
# Cirros image
#wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
wget -P /tmp/images http://vnx.dit.upm.es/vnx/filesystems/ostack-images/cirros-0.3.4-x86_64-disk-vnx.qcow2
#glance image-create --name "cirros-0.3.4-x86_64-vnx" --file /tmp/images/cirros-0.3.4-x86_64-disk-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
openstack image create --file /tmp/images/cirros-0.3.4-x86_64-disk-vnx.qcow2 --disk-format qcow2 --public cirros-0.3.4-x86_64-vnx
rm /tmp/images/cirros-0.3.4-x86_64-disk*.qcow2
# Ubuntu image (trusty)
wget -P /tmp/images http://vnx.dit.upm.es/vnx/filesystems/ostack-images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2
#glance image-create --name "trusty-server-cloudimg-amd64-vnx" --file /tmp/images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
openstack image create --file /tmp/images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --public trusty-server-cloudimg-amd64-vnx
rm /tmp/images/trusty-server-cloudimg-amd64-disk1*.qcow2
# Ubuntu image (xenial)
wget -P /tmp/images http://vnx.dit.upm.es/vnx/filesystems/ostack-images/xenial-server-cloudimg-amd64-disk1-vnx.qcow2
#glance image-create --name "xenial-server-cloudimg-amd64-vnx" --file /tmp/images/xenial-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
openstack image create --file /tmp/images/xenial-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --public xenial-server-cloudimg-amd64-vnx
rm /tmp/images/xenial-server-cloudimg-amd64-disk1*.qcow2
#wget -P /tmp/images http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
#glance image-create --name "CentOS-7-x86_64" --file /tmp/images/CentOS-7-x86_64-GenericCloud.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
#rm /tmp/images/CentOS-7-x86_64-GenericCloud.qcow2
</exec>
<!--
CREATE DEMO SCENARIO
-->
<exec seq="create-demo-scenario" type="verbatim">
. conf/admin-openrc.sh
# Create internal network
openstack network create net0
openstack subnet create --network net0 --gateway 10.1.1.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.1.0/24 --allocation-pool start=10.1.1.8,end=10.1.1.100 subnet0
# Create virtual machine
mkdir -p /root/keys
openstack keypair create vm1 > /root/keys/vm1
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm1 --nic net-id=net0 --key-name vm1
# Create external network
#openstack network create --share --external --provider-physical-network provider --provider-network-type flat ExtNet
#openstack subnet create --network ExtNet --gateway 10.0.10.1 --dns-nameserver 10.0.10.1 --subnet-range 10.0.10.0/24 --allocation-pool start=10.0.10.100,end=10.0.10.200 ExtSubNet
openstack router create r0
openstack router set r0 --external-gateway ExtNet
openstack router add subnet r0 subnet0
# Assign floating IP address to vm1
openstack server add floating ip vm1 $( openstack floating ip create ExtNet -c floating_ip_address -f value )
# Create security group rules to allow ICMP, SSH and WWW access
openstack security group rule create --proto icmp --dst-port 0 default
openstack security group rule create --proto tcp --dst-port 80 default
openstack security group rule create --proto tcp --dst-port 22 default
</exec>
<exec seq="create-demo-vm2" type="verbatim">
. conf/admin-openrc.sh
# Create virtual machine
mkdir -p /root/keys
openstack keypair create vm2 > /root/keys/vm2
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm2 --nic net-id=net0 --key-name vm2
# Assign floating IP address to vm2
#openstack ip floating add $( openstack ip floating create ExtNet -c ip -f value ) vm2
openstack server add floating ip vm2 $( openstack floating ip create ExtNet -c floating_ip_address -f value )
</exec>
<exec seq="create-demo-vm3" type="verbatim">
. conf/admin-openrc.sh
# Create virtual machine
mkdir -p /root/keys
openstack keypair create vm3 > /root/keys/vm3
openstack server create --flavor m1.smaller --image xenial-server-cloudimg-amd64-vnx vm3 --nic net-id=net0 --key-name vm3
# Assign floating IP address to vm3
#openstack ip floating add $( openstack ip floating create ExtNet -c ip -f value ) vm3
openstack server add floating ip vm3 $( openstack floating ip create ExtNet -c floating_ip_address -f value )
</exec>
<exec seq="create-vlan-demo-scenario" type="verbatim">
. conf/admin-openrc.sh
# Create security group rules to allow ICMP, SSH and WWW access
admin_project_id=$(openstack project show admin -c id -f value)
default_secgroup_id=$(openstack security group list -f value | grep $admin_project_id | cut -d " " -f1)
openstack security group rule create --proto icmp --dst-port 0 $default_secgroup_id
openstack security group rule create --proto tcp --dst-port 80 $default_secgroup_id
openstack security group rule create --proto tcp --dst-port 22 $default_secgroup_id
# Create vlan based networks and subnetworks
openstack network create --share --provider-physical-network physnet1 --provider-network-type vlan --provider-segment 1000 vlan1000
openstack network create --share --provider-physical-network physnet1 --provider-network-type vlan --provider-segment 1001 vlan1001
openstack subnet create --network vlan1000 --gateway 10.1.2.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.2.0/24 --allocation-pool start=10.1.2.2,end=10.1.2.99 subvlan1000
openstack subnet create --network vlan1001 --gateway 10.1.3.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.3.0/24 --allocation-pool start=10.1.3.2,end=10.1.3.99 subvlan1001
# Create virtual machine
mkdir -p tmp
openstack keypair create vmA1 > tmp/vmA1
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vmA1 --nic net-id=vlan1000 --key-name vmA1
openstack keypair create vmB1 > tmp/vmB1
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vmB1 --nic net-id=vlan1001 --key-name vmB1
</exec>
</host>
</vnx>