Skip to content

Commit

Permalink
Add cluster management plans
Browse files Browse the repository at this point in the history
  • Loading branch information
jay7x committed Feb 9, 2023
1 parent c46cc4f commit 13a14a8
Show file tree
Hide file tree
Showing 5 changed files with 200 additions and 0 deletions.
5 changes: 5 additions & 0 deletions hiera.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,8 @@ hierarchy:
- "os/%{facts.os.family}.yaml"
- name: 'common'
path: 'common.yaml'

# Define plan lookup hierarchy
# This prevents "Interpolations are not supported in lookups outside of an apply
# block" error when plan with lookup is executed
plan_hierarchy: []
33 changes: 33 additions & 0 deletions plans/cluster/delete.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# @summary Delete the cluster of Lima VMs
# @param name
# Cluster name
# @param clusters
# Hash of all defined clusters. Populated from Hiera usually.
# @param target
# The host to run the limactl on
plan lima::cluster::delete (
String[1] $name,
Optional[Hash] $clusters = undef,
TargetSpec $target = 'localhost',
) {
$cluster = run_plan('lima::clusters', 'clusters' => $clusters, 'name' => $name)

$defined_nodes = $cluster['nodes'].map |$node| {
$node ? {
Hash => $node['name'],
String => $node,
default => undef,
}
}
out::verbose("Nodes to delete: ${defined_nodes}")

$stop_res = parallelize ($defined_nodes) |$node| {
run_task(
'lima::delete',
$target,
'name' => $node,
)
}

return $stop_res
}
102 changes: 102 additions & 0 deletions plans/cluster/start.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# @summary Create/start the cluster of Lima VMs
# @param name
# Cluster name
# @param clusters
# Hash of all defined clusters. Populated from Hiera usually.
# @param target
# The host to run the limactl on
plan lima::cluster::start (
String[1] $name,
Optional[Hash] $clusters = undef,
TargetSpec $target = 'localhost',
) {
$cluster = run_plan('lima::clusters', 'clusters' => $clusters, 'name' => $name)
$tgt = get_target($target)

$cluster_config = $cluster['nodes'].map |$node| {
$n = $node ? {
Hash => $node,
String => { 'name' => $node },
default => {},
}

# Use per-node configs first. Use cluster-wide configs otherwise.
# Look for explicit config hash first then template then url.
$cfg = [
[$n['config'], 'config'],
[$n['template'], 'template'],
[$n['url'], 'url'],
[$cluster['config'], 'config'],
[$cluster['template'], 'template'],
[$cluster['url'], 'url'],
].filter |$x| { $x[0] } # Delete undefined options

unless $cfg.count >= 1 {
fail("Node ${n['name']} has no config/template/url defined in the cluster configuration")
}

# Use first defined option ($cfg[0])
({ 'name' => $n['name'], $cfg[0][1] => $cfg[0][0] })
}

$defined_nodes = $cluster_config.map |$node| { $node['name'] }
out::verbose("Defined nodes: ${defined_nodes}")

# Collect and set the target's facts
if empty(facts($tgt)) {
without_default_logging() || {
run_plan('facts', $tgt, '_catch_errors' => true)
}
}
$cpus = facts($tgt).get('processors.count')
$start_threads = if $cpus < 4 { 1 } else { $cpus / 2 } # Assume every VM can consume up to 200% of a CPU core on start

# Get existing VMs
$list_res = without_default_logging() || {
run_task(
'lima::list',
$tgt,
{ names => $defined_nodes },
)
}
$lima_config = $list_res.find($target).value['list']

# Create missing nodes
$missing_nodes = $defined_nodes - $lima_config.map |$x| { $x['name'] }
out::verbose("Nodes to create: ${missing_nodes}")

# `limactl start` cannot create multiple images in parallel
# See https://github.com/lima-vm/lima/issues/1354
# So creating VMs sequentially..
$create_res = $missing_nodes.map |$node| {
run_task(
'lima::start',
$tgt,
"Create VM ${node}",
'name' => $node,
'template' => $cluster['template'],
'config' => $cluster['config'],
'url' => $cluster['url'],
)
}

# Start existing but non-running nodes
$stopped_nodes = $lima_config
.filter |$x| { $x['status'] == 'Stopped' }
.map |$x| { $x['name'] }
out::verbose("Nodes to start (${start_threads} nodes per batch): ${stopped_nodes}")

# Run in batches of $start_threads VMs in parallel
$start_res = $stopped_nodes.slice($start_threads).map |$batch| {
$batch.parallelize |$node| {
run_task(
'lima::start',
$tgt,
"Start VM ${node}",
'name' => $node,
)
}
}

return flatten($create_res + $start_res)
}
46 changes: 46 additions & 0 deletions plans/cluster/stop.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# @summary Stop the cluster of Lima VMs
# @param name
# Cluster name
# @param clusters
# Hash of all defined clusters. Populated from Hiera usually.
# @param target
# The host to run the limactl on
plan lima::cluster::stop (
String[1] $name,
Optional[Hash] $clusters = undef,
TargetSpec $target = 'localhost',
) {
$cluster = run_plan('lima::clusters', 'clusters' => $clusters, 'name' => $name)

$defined_nodes = $cluster['nodes'].map |$node| {
$node ? {
Hash => $node['name'],
String => $node,
default => undef,
}
}
out::verbose("Defined nodes: ${defined_nodes}")

$list_res = without_default_logging() || {
run_task(
'lima::list',
$target,
'names' => $defined_nodes,
)
}
$running_nodes = $list_res.find($target).value['list']
.filter |$x| { $x['status'] == 'Running' }
.map |$x| { $x['name'] }
out::verbose("Nodes to stop: ${running_nodes}")

# Stop running nodes
$stop_res = parallelize ($running_nodes) |$node| {
run_task(
'lima::stop',
$target,
'name' => $node,
)
}

return $stop_res
}
14 changes: 14 additions & 0 deletions plans/clusters.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# @summary Return the cluster definition
# @param name
# Cluster name
# @param clusters
# Hash of all defined clusters. Populated from Hiera usually.
# @return [Hash] Return the named cluster definition
plan lima::clusters (
String[1] $name,
Hash $clusters = lookup('lima::clusters', 'default_value' => {}),
) {
return $clusters[$name].lest || {
fail_plan("Cluster '${name}' is not defined", 'lima/undefined-cluster')
}
}

0 comments on commit 13a14a8

Please sign in to comment.