Skip to content

Commit

Permalink
Adding swarm demo
Browse files Browse the repository at this point in the history
Also merging the demo & test files for k8s.
Simplifying the vagrant configuration to:
1. Use CONTIV_NODES to specify node count
2. Use CONTIV_IP_PREFIX to allow specifying the subnet
3. Generate cfg.yaml based on the IP prefix and node count
4. Use the cfg.yaml to get the master IP, instead of using the hardcoded
192.168.2.xx addresses
  • Loading branch information
neelimamukiri committed Feb 13, 2017
1 parent f99fd02 commit f74a3b0
Show file tree
Hide file tree
Showing 18 changed files with 282 additions and 254 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
# vagrant data directories
cluster/.vagrant
cluster/.etc_hosts
cluster/.cfg.yml
19 changes: 13 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ release:

# Brings up a demo cluster to install Contiv on - by default this is a docker, centos cluster.
# It can be configured to start a RHEL cluster by setting CONTIV_NODE_OS=rhel7.
# It can be started with k8s kubeadm install by running with VAGRANT_USE_KUBEADM=1.
# It can be started with k8s kubeadm install by running with CONTIV_KUBEADM=1.
cluster: cluster-destroy
cd cluster && vagrant up

Expand All @@ -20,20 +20,27 @@ cluster-destroy:
# demo-k8s brings up a cluster with k8s, runs the installer on it, and shows the URL
# of the demo Contiv Admin Console which was set up
demo-k8s:
@bash ./scripts/demo-k8s.sh
CONTIV_KUBEADM=1 make cluster
BUILD_VERSION=1.0.0-beta.2 make install-test-kubeadm

# demo-swarm brings up a cluster with docker swarm, runs the installer on it, and shows the URL
# of the demo Contiv Admin Console which was set up
demo-swarm:
make cluster
BUILD_VERSION=1.0.0-beta.2 make install-test-swarm

# Create a release and test the release installation on a vagrant cluster
# TODO: The vagrant part of this can be optimized by taking snapshots instead
# of creating a new set of VMs for each case
release-test-kubeadm: release
# Test kubeadm (centos by default)
VAGRANT_USE_KUBEADM=1 make cluster
VAGRANT_USE_KUBEADM=1 make install-test-kubeadm
CONTIV_KUBEADM=1 make cluster
CONTIV_KUBEADM=1 make install-test-kubeadm

release-test-swarm: release
# Test swarm (centos by default)
CLUSTER_CONFIG='cluster_defs_ansible.json' make cluster
CLUSTER_CONFIG='cluster_defs_ansible.json' make install-test-swarm
make cluster
make install-test-swarm

release-test-kubelegacy: release
# Test k8s ansible (centos by default)
Expand Down
32 changes: 32 additions & 0 deletions QUICKSTART.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Quick Start Guide

## Pre-requisites

* [Install Virtual Box 5.1.14 or later]( https://www.virtualbox.org/wiki/Downloads )
* [Install Vagrant 1.9.1 or later]( https://www.vagrantup.com/downloads.html )
* [Install Docker 1.12 or later]( https://docs.docker.com/engine/installation/ )
* Clone the Contiv install repository <br>
`git clone http://github.com/contiv/install'

## Setup the cluster with Contiv for Kubernetes
`make demo-k8s`

## Setup the cluster with Contiv for Docker with Swarm
`make demo-swarm`

## Customizing the setup

* The default configuration creates a 2 node cluster. To increase the number of nodes set the environment variable `CONTIV_NODES=<n>`

## Quick Start Guide for CentOS 7.x hosts

* Setup the pre-requisites as follows and follow the demo instructions above
```
wget https://releases.hashicorp.com/vagrant/1.9.1/vagrant_1.9.1_x86_64.rpm
wget http://download.virtualbox.org/virtualbox/5.1.14/VirtualBox-5.1-5.1.14_112924_el7-1.x86_64.rpm
sudo yum install VirtualBox-5.1-5.1.14_112924_el7-1.x86_64.rpm -y
sudo yum install vagrant_1.9.1_x86_64.rpm -y
sudo yum install docker -y
sudo systemctl start docker
git clone http://github.com/contiv/install
```
169 changes: 95 additions & 74 deletions cluster/Vagrantfile
Original file line number Diff line number Diff line change
@@ -1,116 +1,137 @@
# -*- mode: ruby -*-
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'rubygems'
require 'json'
require 'fileutils'
require 'yaml'

token = 'd900e1.8a392798f13b33a4'

# method to create an etc_hosts file based on the cluster info
def create_etc_hosts(cluster)
master_ip = '192.168.2.10'
def create_etc_hosts(num_nodes, base_ip, start)
master_ip = base_ip + start.to_s
hosts = "127.0.0.1 localhost\n"
cluster.each do |role, member_list|
hosts = member_list.inject(hosts) { |acc, elem| acc << "#{elem['contiv_control_ip']} #{elem['name']}\n" }
if role == 'master' && member_list[0]
hosts << "#{member_list[0]['contiv_control_ip']} netmaster\n"
master_ip = member_list[0]['contiv_control_ip']
end
end
hosts << "#{master_ip} netmaster\n"
hosts = (0..num_nodes).inject(hosts) { |acc, elem| acc << base_ip + "#{elem + start} contiv-node#{elem + 1} \n" }

etc_file = (ENV['VAGRANT_CWD'] || '.') + '/export/.etc_hosts'
etc_file = (ENV['VAGRANT_CWD'] || '.') + '/.etc_hosts'
File.write(etc_file, hosts)
master_ip
end

# method to create an cfg file based on the cluster info
def create_cfg_info(num_nodes, node_ips)
conn = {}
num_nodes.times do |n|
node_ip = node_ips[n]
node = if n.zero?
{ 'role' => 'master' }
else
{}
end
node['control'] = ENV['CONTIV_CONTROL_IF'] || 'eth1'
node['data'] = ENV['CONTIV_DATA_IF'] || 'eth2'
conn[node_ip] = node
end
cfg_data = { 'CONNECTION_INFO' => conn }
cfg_file = (ENV['VAGRANT_CWD'] || '.') + '/.cfg.yml'
File.write(cfg_file, cfg_data.to_yaml)
end

provision_node = <<SCRIPT
echo "export https_proxy='$2'" >> /etc/profile.d/envvar.sh
echo "export http_proxy='$1'" >> ~/.profile
echo "export https_proxy='$2'" >> ~/.profile
source /etc/profile.d/envvar.sh
sudo yum install -y net-tools
echo $3 > /etc/hosts
ifup eth1
SCRIPT

# begin execution here
# read the cluster configuration and create /etc/hosts file
config_file = ENV['CLUSTER_CONFIG'] || 'cluster_defs.json'
cluster = JSON.parse(File.read((ENV['VAGRANT_CWD'] || '.') + '/' + config_file))
master_ip = create_etc_hosts(cluster)
num_nodes = 2
if ENV['CONTIV_NODES'] && ENV['CONTIV_NODES'] != ''
num_nodes = ENV['CONTIV_NODES'].to_i
end
base_ip = '192.168.2.'
if ENV['CONTIV_IP_PREFIX'] && ENV['CONTIV_IP_PREFIX'] != ''
base_ip = ENV['CONTIV_IP_PREFIX']
end
start = ENV['CONTIV_KUBEADM'] ? 50 : 50 + num_nodes
name_start = ENV['CONTIV_KUBEADM'] ? 1 : 1 + num_nodes
node_ips = Array.new(num_nodes) { |n| base_ip + (n + start).to_s }
node_names = Array.new(num_nodes) { |n| "contiv-node#{n + name_start}" }

master_ip = create_etc_hosts(num_nodes, base_ip, start)
create_cfg_info(num_nodes, node_ips)

VAGRANTFILE_API_VERSION = '2'.freeze
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.synced_folder './export', '/shared'
config.vm.provider 'virtualbox' do |v|
v.linked_clone = true if Vagrant::VERSION >= '1.8'
end
config.vm.box_check_update = false
config.vbguest.auto_update = false if Vagrant.has_plugin?('vagrant-vbguest')
if ENV['CONTIV_NODE_OS'] && ENV['CONTIV_NODE_OS'] == 'rhel7'
config.registration.manager = 'subscription_manager'
config.registration.username = ENV['CONTIV_RHEL_USER']
config.registration.password = ENV['CONTIV_RHEL_PASSWD']
end
# config.ssh.password = 'vagrant'
config.ssh.insert_key = false
config.ssh.private_key_path = './export/insecure_private_key'

cluster.each do |role, member_list|
member_list.each do |member_info|
config.vm.define vm_name = member_info['name'] do |c|
if ENV['CONTIV_NODE_OS'] && ENV['CONTIV_NODE_OS'] == 'rhel7'
# Download rhel7.2 box from https://access.redhat.com/downloads/content/293/ver=2/rhel---7/2.0.0/x86_64/product-software
# Add it as rhel7 vagrant box add rhel-cdk-kubernetes-7.2-29.x86_64.vagrant-virtualbox.box --name=rhel7
c.vm.box = 'rhel7'
else
c.vm.box = 'centos/7'
end
c.vm.provision 'shell' do |s|
s.inline = provision_node
s.args = [ENV['http_proxy'] || '', ENV['https_proxy'] || '']
end
config.ssh.insert_key = false
num_nodes.times do |n|
node_name = node_names[n]
node_addr = node_ips[n]
config.vm.define vm_name = node_name do |c|
if ENV['CONTIV_NODE_OS'] && ENV['CONTIV_NODE_OS'] == 'rhel7'
# Download rhel7.2 box from https://access.redhat.com/downloads/content/293/ver=2/rhel---7/2.0.0/x86_64/product-software
# Add it as rhel7 vagrant box add rhel-cdk-kubernetes-7.2-29.x86_64.vagrant-virtualbox.box --name=rhel7
c.vm.box = 'rhel7'
else
c.vm.box = 'centos/7'
end
c.vm.provision 'shell' do |s|
s.inline = provision_node
etc_file = (ENV['VAGRANT_CWD'] || '.') + '/.etc_hosts'
hosts = File.read(etc_file)
s.args = [ENV['http_proxy'] || '', ENV['https_proxy'] || '', hosts]
end

# configure ip address etc
c.vm.hostname = vm_name
c.vm.network :private_network, ip: member_info['contiv_control_ip']
c.vm.network :private_network, ip: member_info['contiv_network_ip'], virtualbox__intnet: 'true', auto_config: false
c.vm.provider 'virtualbox' do |v|
v.memory = vm_name == 'contiv-master' ? 2048 : 1024
# make all nics 'virtio' to take benefit of builtin vlan tag
# support, which otherwise needs to be enabled in Intel drivers,
# which are used by default by virtualbox
v.customize ['modifyvm', :id, '--nictype1', 'virtio']
v.customize ['modifyvm', :id, '--nictype2', 'virtio']
v.customize ['modifyvm', :id, '--nictype3', 'virtio']
v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all']
v.customize ['modifyvm', :id, '--nicpromisc3', 'allow-all']
v.customize ['modifyvm', :id, '--paravirtprovider', 'kvm']
end # v
# configure ip address etc
c.vm.hostname = vm_name
c.vm.network :private_network, ip: node_addr
c.vm.network :private_network, ip: node_addr, virtualbox__intnet: 'true', auto_config: false
if n.zero?
c.vm.network 'forwarded_port', guest: 10_000, host: 10_005 + node_addr.split('.')[3].to_i
end
c.vm.provider 'virtualbox' do |v|
# kube api server node is super slow with just 1GB RAM, so give it more
v.memory = ENV['CONTIV_KUBEADM'] && n.zero? ? 2048 : 1024
# make all nics 'virtio' to take benefit of builtin vlan tag
# support, which otherwise needs to be enabled in Intel drivers,
# which are used by default by virtualbox
v.customize ['modifyvm', :id, '--nictype1', 'virtio']
v.customize ['modifyvm', :id, '--nictype2', 'virtio']
v.customize ['modifyvm', :id, '--nictype3', 'virtio']
v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all']
v.customize ['modifyvm', :id, '--nicpromisc3', 'allow-all']
v.customize ['modifyvm', :id, '--paravirtprovider', 'kvm']
end # v

if ENV['CONTIV_KUBEADM']
c.vm.provision 'shell', inline: <<-EOS
#copy the etc_hosts file we created
sudo cp /shared/.etc_hosts /etc/hosts
EOS
if ENV['VAGRANT_USE_KUBEADM']
c.vm.provision 'shell', inline: <<-EOS
sudo setenforce 0
sudo systemctl stop firewalld
sudo /etc/init.d/network restart
#copy the etc_hosts file we created
sudo cp /shared/.etc_hosts /etc/hosts
EOS
c.vm.provision :shell, path: 'bootstrap_centos.sh'
if role == 'master'
# Install kubernetes on master
ks8_ver = ENV['CONTIV_K8s_VERSION'] || 'v1.4.7'
c.vm.provision :shell, path: 'k8smaster_centos.sh', args: [token, member_info['contiv_control_ip'], ks8_ver]
else
# Install kubernetes on nodes
c.vm.provision :shell, path: 'k8sworker_centos.sh', args: [token, master_ip]
end # if
c.vm.provision :shell, path: 'bootstrap_centos.sh'
if n.zero?
# Install kubernetes on master
ks8_ver = ENV['CONTIV_K8s_VERSION'] || 'v1.4.7'
c.vm.provision :shell, path: 'k8smaster_centos.sh', args: [token, node_addr, ks8_ver]
else
c.vm.provision :shell, inline: 'yum install policycoreutils-python -y'
end
end # c
end # member_info
# Install kubernetes on nodes
c.vm.provision :shell, path: 'k8sworker_centos.sh', args: [token, master_ip]
end # if
else
c.vm.provision :shell, inline: 'yum install policycoreutils-python -y'
end
end # c
end # role
end # config
#
3 changes: 0 additions & 3 deletions cluster/cluster_defs.json

This file was deleted.

3 changes: 0 additions & 3 deletions cluster/cluster_defs_ansible.json

This file was deleted.

10 changes: 0 additions & 10 deletions cluster/export/docker-tcp.socket

This file was deleted.

17 changes: 0 additions & 17 deletions cluster/export/docker.service

This file was deleted.

27 changes: 0 additions & 27 deletions cluster/export/insecure_private_key

This file was deleted.

4 changes: 2 additions & 2 deletions install/ansible/cfg.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
CONNECTION_INFO:
192.168.2.12:
192.168.2.52:
role: master
control: eth1
data: eth2
192.168.2.13:
192.168.2.53:
control: eth1
data: eth2
8 changes: 4 additions & 4 deletions install/ansible/install_swarm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@ fi

echo "Starting the installer container"
image_name="contiv/install:__CONTIV_INSTALL_VERSION__"
install_mount="-v $(pwd)/install:/install"
ansible_mount="-v $(pwd)/ansible:/ansible"
config_mount="-v $src_conf_path:$container_conf_path"
cache_mount="-v $(pwd)/contiv_cache:/var/contiv_cache"
install_mount="-v $(pwd)/install:/install:Z"
ansible_mount="-v $(pwd)/ansible:/ansible:Z"
config_mount="-v $src_conf_path:$container_conf_path:Z"
cache_mount="-v $(pwd)/contiv_cache:/var/contiv_cache:Z"
mounts="$install_mount $ansible_mount $cache_mount $config_mount"
docker run --rm $mounts $image_name sh -c "./install/ansible/install.sh $netmaster_param -a \"$ans_opts\" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param"
8 changes: 4 additions & 4 deletions install/ansible/uninstall_swarm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ fi
ans_opts="$ans_opts --private-key $def_ans_key -u $ans_user"
echo "Starting the uninstaller container"
image_name="contiv/install:__CONTIV_INSTALL_VERSION__"
install_mount="-v $(pwd)/install:/install"
ansible_mount="-v $(pwd)/ansible:/ansible"
config_mount="-v $src_conf_path:$container_conf_path"
cache_mount="-v $(pwd)/contiv_cache:/var/contiv_cache"
install_mount="-v $(pwd)/install:/install:Z"
ansible_mount="-v $(pwd)/ansible:/ansible:Z"
config_mount="-v $src_conf_path:$container_conf_path:Z"
cache_mount="-v $(pwd)/contiv_cache:/var/contiv_cache:Z"
mounts="$install_mount $ansible_mount $cache_mount $config_mount"
docker run --rm $mounts $image_name sh -c "./install/ansible/uninstall.sh $netmaster_param -a \"$ans_opts\" $uninstall_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $reset_params"
3 changes: 0 additions & 3 deletions install/install.sh

This file was deleted.

Loading

0 comments on commit f74a3b0

Please sign in to comment.