Skip to content

Commit

Permalink
Fix secret and node token
Browse files Browse the repository at this point in the history
  • Loading branch information
GMZwinge committed Sep 19, 2024
1 parent 2fa37ae commit 3dce681
Show file tree
Hide file tree
Showing 11 changed files with 157 additions and 43 deletions.
43 changes: 17 additions & 26 deletions Build.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
The user to connect to the target.
.PARAMETER Pass
The password to connect to the target.
.PARAMETER Type
.PARAMETER NodeType
Whether to provision the target as a Kubernetes control plane node (ctrl) or worker node.
#>
[CmdletBinding()]
Expand All @@ -18,7 +18,7 @@ Param(
[string]$User = 'user',
[string]$Pass = 'pass',
[ValidateSet('controller', 'worker')]
[string]$Type = 'worker'
[string]$NodeType = 'worker'
)
$Script:ErrorActionPreference = 'Stop'
Set-StrictMode -Version Latest
Expand All @@ -28,32 +28,23 @@ if (-not $MyInvocation.BoundParameters.ContainsKey('Verbose')) {
}
$ScriptRoot = $PSScriptRoot
#
Write-Verbose 'Installing modules...'
# The Puppet Bolt image:
$BoltDockerImage = 'puppet/puppet-bolt:3.27.4'
# Project names can contain only lowercase letters, numbers, and underscores, and begin with a lowercase letter.
$BoltProjectName = 'test_voxpupuli_puppet_k8s'
$DockerMountDir = "/$BoltProjectName"
# Mount source must be an absolute path.
$DockerMountSource = $ScriptRoot
$Command = "docker container run --mount 'type=bind,source=$DockerMountSource,destination=$DockerMountDir'" +
" --workdir '$DockerMountDir' --rm --env 'BOLT_PROJECT=$DockerMountDir' '$BoltDockerImage'" +
" module install --force"
Write-Verbose $Command
Invoke-Expression $Command
if ($LASTEXITCODE -ne 0) {
throw "This command failed with exit code ${LASTEXITCODE}: $Command"
}
# Get shared variables.
. "$PSScriptRoot\_Shared.ps1"
#
Write-Verbose 'Running plan...'
$BaseCommand = "docker container run --mount 'type=bind,source=$PWD,destination=$DockerMountDir'" +
" --workdir '$DockerMountDir' --rm --env 'BOLT_PROJECT=$DockerMountDir' '$BoltDockerImage'" +
" --verbose plan run '${BoltProjectName}::myplan' --targets '$Target'" +
" --user '$User' --password '$Pass' --inventory 'inventory.yaml' 'type=$Type'"
#$OutputFile = "$ScriptRoot\Build-plan-run-$Target-verbose-1.log"
# TODO: could log the std out and err of the bolt command inside the container,
# may need to override the entry point and run the whole bolt command itself,
# possibly through a shell, then redirect ouputs using the parameters
# > bolt-plan-run-std.log 2>&1
$BaseCommand = "docker container run --mount 'type=bind,source=$DockerMountSource,destination=$DockerMountDestination'" +
" --workdir '$DockerMountDestination' --rm --env 'BOLT_PROJECT=$DockerMountDestination' '$BoltDockerImage'" +
" plan run '${BoltProjectName}::myplan' --targets '$Target'" +
" --verbose" +
" --user '$User' --password '$Pass' --inventory 'inventory.yaml' 'node_type=$NodeType'"
$OutputFile = "$ScriptRoot\Build-plan-run-$Target-verbose.log"
# Send command to output file so that I can easily copy and paste to run it again.
#$BaseCommand | Out-File $OutputFile -Encoding ascii
#$Command = "$BaseCommand | Out-File '$OutputFile' -Encoding ascii -Append"
$Command = $BaseCommand
'',$BaseCommand,'' | Out-File $OutputFile -Encoding ascii -Append
$Command = "$BaseCommand | Out-File '$OutputFile' -Encoding ascii -Append"
#$Command = $BaseCommand
Write-Verbose $Command
Invoke-Expression $Command
29 changes: 29 additions & 0 deletions InstallModules.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
<#
.SYNOPSIS
Install modules.
.DESCRIPTION
Install modules.
#>
[CmdletBinding()]
Param(
)
$Script:ErrorActionPreference = 'Stop'
Set-StrictMode -Version Latest
trap { throw $Error[0] }
if (-not $MyInvocation.BoundParameters.ContainsKey('Verbose')) {
$VerbosePreference = [System.Management.Automation.ActionPreference]::Continue
}
$ScriptRoot = $PSScriptRoot
#
# Get shared variables.
. "$PSScriptRoot\_Shared.ps1"
#
Write-Verbose 'Installing modules...'
$Command = "docker container run --mount 'type=bind,source=$DockerMountSource,destination=$DockerMountDestination'" +
" --workdir '$DockerMountDestination' --rm --env 'BOLT_PROJECT=$DockerMountDestination' '$BoltDockerImage'" +
" module install --force"
Write-Verbose $Command
Invoke-Expression $Command
if ($LASTEXITCODE -ne 0) {
throw "This command failed with exit code ${LASTEXITCODE}: $Command"
}
62 changes: 62 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,65 @@
# test-voxpupuli-puppet-k8s

- .gitignore: from a Bolt project created following https://www.puppet.com/docs/bolt/latest/bolt_installing_modules.html.
- Provision CentOS 9 VMs.
- Configure `local.yaml` for your environment.
- Install modules by running this command:
```
bolt module install --force
```
On Windows, you can run:
```
.\InstallModules.ps1
```
- On one of the VM, provision controller by running this command:
```
bolt plan run test_voxpupuli_puppet_k8s::myplan --targets <VmHostname> --user <VmUsername> --password <VmPassword> --inventory inventory.yaml node_type=controller
```
On Windows, you can run:
```
.\Build.ps1 -Target <VmHostnames> -User <VmUsername> -Pass <VmPassword> -NodeType controller
```
It should fails with this error:
```
Err: /Stage[main]/K8s::Server::Resources::Bootstrap/Kubectl_apply[puppet:cluster-info:reader Role]: Could not evaluate: Execution of '/bin/kubectl --namespace kube-system --kubeconfig /root/.kube/config get Role puppet:cluster-info:reader --output json' returned 1: error: the server doesn't have a resource type "Role"
```
Run the same command a second time, and it should succeed.
- On one of the controller VM, copy /root/.kube/config and save it into ~/.kube/config. Eg:
```
mkdir -p $HOME/.kube
sudo cp -i /root/.kube/config $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
```
- Check Kubernetes namespaces by running this commands:
```
kubectl get namespaces
```
It should list something like this:
```
NAME STATUS AGE
default Active 20m
kube-node-lease Active 20m
kube-public Active 20m
kube-system Active 20m
```
- On the other VMs, provision worker by running this command:
```
bolt plan run test_voxpupuli_puppet_k8s::myplan --targets <VmHostname> --user <VmUsername> --password <VmPassword> --inventory inventory.yaml node_type=worker
```
```
On Windows, you can run:
```
.\Build.ps1 -Target <VmHostnames> -User <VmUsername> -Pass <VmPassword> -NodeType worker
```
- On the same controller VM as above:
- Check Kubernetes nodes by running this commands:
```
kubectl get nodes
```
It should list something like this:
```
NAME STATUS ROLES AGE VERSION
<workerFqdn> Ready <none> 5h29m v1.26.1
```
8 changes: 8 additions & 0 deletions _Shared.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#
# The Puppet Bolt image:
$BoltDockerImage = 'puppet/puppet-bolt:3.27.4'
# Project names can contain only lowercase letters, numbers, and underscores, and begin with a lowercase letter.
$BoltProjectName = 'test_voxpupuli_puppet_k8s'
$DockerMountDestination = "/$BoltProjectName"
# Mount source must be an absolute path.
$DockerMountSource = $PSScriptRoot
5 changes: 5 additions & 0 deletions bolt-project.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,8 @@ modulepath:
- site
modules:
- name: puppet/k8s
# Log level in their own files. From https://www.puppet.com/docs/bolt/latest/logs.
log:
bolt-plan-run-info.log:
append: false
level: info
18 changes: 14 additions & 4 deletions data/common.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,25 @@ k8s::install::crictl::config:

# set token in an upper level, so that it can be used by controller and worker
# see also worker.yaml for usage
k8s::server::resources::bootstrap::secret: '0123456789abcdef'
#k8s::server::resources::bootstrap::secret: '0123456789abcdef'
# From workaround in [bootstrap token is sensitive, node_token is not](https://github.com/voxpupuli/puppet-k8s/issues/51):
lookup_options:
k8s::server::resources::bootstrap::secret:
convert_to: Sensitive
k8s::node::node_token:
convert_to: Sensitive
k8s::secret: 0123456789abcdef
k8s::server::resources::bootstrap::secret: "%{lookup('k8s::secret')}"
k8s::node::node_token: "puppet.%{lookup('k8s::secret')}"

# set data in an upper level, so that it can be used by controller and worker
k8s::container_manager: containerd # k8s-class default: crio
#k8s::container_manager: containerd # k8s-class default: crio
k8s::container_manager: crio # k8s-class default: crio
k8s::puppetdb_discovery: false # k8s-class default: false
k8s::k8s_version: 1.26.1 # k8s-class default: 1.26.1
# TODO: figure out how to get the fqdn in that url from a parameter to bolt plan run!!!!!
# TODO: figure out how to get the fqdn in that url from a parameter to bolt plan run command!!!!!
k8s::control_plane_url: https://kubernetes:6443 # k8s-class default: https://kubernetes:6443

# TODO: figure out how to get the fqdn in that url from a parameter to bolt plan run!!!!!
# TODO: figure out how to get the fqdn in that url from a parameter to bolt plan run command!!!!!
k8s::server::etcd_servers:
- https://kubernetes:2379
10 changes: 8 additions & 2 deletions data/nodes/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,13 @@ k8s::server::manage_kubeadm: true
# Choose an interface which is for cluster communications.
# The apiserver will expose a port on the controller
# and all the workers need to be able to reach it.
k8s::server::apiserver::advertise_address: "%{facts.networking.interfaces.enp0s8.ip}"
#k8s::server::apiserver::advertise_address: "%{facts.networking.interfaces.enp0s8.ip}"
# This is for my VMs:
#k8s::server::apiserver::advertise_address: "%{facts.networking.interfaces.ens32.ip}"
# Why use a hardware specifig facts when there is a non-hardware specific one?
#k8s::server::apiserver::advertise_address: "%{facts.networking.ip}"
# Why set something here when the default in .modules\k8s\manifests\server\apiserver.pp
# is the same.

### K8S::Server::Resources
k8s::server::resources::manage_flannel: false
#k8s::server::resources::manage_flannel: false
3 changes: 2 additions & 1 deletion data/nodes/worker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,6 @@ classes:

### K8S::Node
# see common.yaml for the origin of the data
k8s::node::node_token: "puppet.%{lookup('k8s::server::resources::bootstrap::secret')}"
#k8s::node::node_token: "puppet.%{lookup('k8s::server::resources::bootstrap::secret')}"
#k8s::node::node_token: "puppet.%{lookup('k8s::secret')}"
k8s::node::manage_crictl: true
14 changes: 8 additions & 6 deletions hiera.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
---
version: 5
#hierarchy:
# - name: Common
# path: common.yaml
#defaults:
# data_hash: yaml_data
# datadir: data
hierarchy:
- name: Per node type data
path: nodes/%{node_type}.yaml
- name: Common
path: common.yaml
defaults:
data_hash: yaml_data
datadir: data
4 changes: 2 additions & 2 deletions manifests/my_manifest.pp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
class test_voxpupuli_puppet_k8s::my_manifest (
Enum['controller', 'worker'] $type,
Enum['controller', 'worker'] $node_type,
) {
notify { 'Start test_voxpupuli_puppet_k8s::my_manifest': }
if $type == 'controller' {
if $node_type == 'controller' {
include profile::k8s::controller
} else {
include profile::k8s::worker
Expand Down
4 changes: 2 additions & 2 deletions plans/myplan.pp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
plan test_voxpupuli_puppet_k8s::myplan (
TargetSpec $targets,
Enum['controller', 'worker'] $type,
Enum['controller', 'worker'] $node_type,
) {
out::message('Start test_voxpupuli_puppet_k8s::myplan.')
#
Expand All @@ -13,7 +13,7 @@
$apply_results = apply($targets) {
notify { "Start apply(${trusted['hostname']}).": }
class { 'test_voxpupuli_puppet_k8s::my_manifest':
type => $type,
node_type => $node_type,
}
notify { "End apply(${trusted['hostname']}).":}
}
Expand Down

0 comments on commit 3dce681

Please sign in to comment.