Skip to content

Commit

Permalink
Merge pull request #107 from thebsdbox/node_label
Browse files Browse the repository at this point in the history
Node commands
  • Loading branch information
thebsdbox authored Aug 14, 2018
2 parents 1f24afa + f504ec8 commit d725a12
Show file tree
Hide file tree
Showing 3 changed files with 364 additions and 6 deletions.
183 changes: 178 additions & 5 deletions cmd/ucp_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,50 @@ package cmd
import (
"fmt"
"os"
"strconv"
"text/tabwriter"

log "github.com/Sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/thebsdbox/diver/pkg/ucp"
)

// Used to enable or disable orchestrator type
var orchestratorKube, orchestratorSwarm bool

// Set a node to a swarm availability state
var availability string

// Set a node to a specific role type
var role string

// Set a label on a node
var labelKey, labelValue string

func init() {

ucpNodesAvailability.Flags().StringVar(&id, "id", "", "ID of the Docker Node")
ucpNodesAvailability.Flags().StringVar(&availability, "state", "active", "Node availability [active/drain/pause]")

ucpNodesGet.Flags().StringVar(&id, "id", "", "ID of the Docker Node")

ucpNodes.AddCommand(ucpNodesList)
ucpNodesLabel.Flags().StringVar(&id, "id", "", "ID of the Docker Node")
ucpNodesLabel.Flags().StringVar(&labelKey, "key", "", "The label Key")
ucpNodesLabel.Flags().StringVar(&labelValue, "value", "", "The label Value")

ucpNodesOrchestrator.Flags().StringVar(&id, "id", "", "ID of the Docker Node")
ucpNodesOrchestrator.Flags().BoolVar(&orchestratorKube, "kubernetes", false, "Enable Kubernetes to use this node")
ucpNodesOrchestrator.Flags().BoolVar(&orchestratorSwarm, "swarm", false, "Enable Swarm to use this node")

ucpNodesRole.Flags().StringVar(&id, "id", "", "ID of the Docker Node")
ucpNodesRole.Flags().StringVar(&role, "role", "", "Node role [manager/worker]")

ucpNodes.AddCommand(ucpNodesAvailability)
ucpNodes.AddCommand(ucpNodesGet)
ucpNodes.AddCommand(ucpNodesLabel)
ucpNodes.AddCommand(ucpNodesList)
ucpNodes.AddCommand(ucpNodesOrchestrator)
ucpNodes.AddCommand(ucpNodesRole)

// Add nodes to UCP root commands
UCPRoot.AddCommand(ucpNodes)
Expand Down Expand Up @@ -51,11 +83,31 @@ var ucpNodesList = &cobra.Command{
log.Fatalf("No Nodes found")
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, tabPadding, ' ', 0)
fmt.Fprintln(w, "Name\tID\tRole\tVersion\tPlatform")
fmt.Fprintln(w, "Name\tID\tRole\tVersion\tPlatform\tSwarm\tKubernetes")
for i := range nodes {
// Combine OS/Arch
platform := nodes[i].Description.Platform.OS + "/" + nodes[i].Description.Platform.Architecture
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", nodes[i].Description.Hostname, nodes[i].ID, nodes[i].Spec.Role, nodes[i].Description.Engine.EngineVersion, platform)

// Determine Orchestrator configuration
orchestratorKube, err = strconv.ParseBool(nodes[i].Spec.Labels["com.docker.ucp.orchestrator.kubernetes"])
if err != nil {
// If there is an error it means that the label isn't part of the spec, default to disabled
orchestratorKube = false
}

orchestratorSwarm, err = strconv.ParseBool(nodes[i].Spec.Labels["com.docker.ucp.orchestrator.swarm"])
if err != nil {
// If there is an error it means that the label isn't part of the spec, default to disabled
orchestratorSwarm = false
}

fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%t\t%t\n", nodes[i].Description.Hostname,
nodes[i].ID,
nodes[i].Spec.Role,
nodes[i].Description.Engine.EngineVersion,
platform,
orchestratorSwarm,
orchestratorKube)
}
w.Flush()
},
Expand All @@ -66,7 +118,10 @@ var ucpNodesGet = &cobra.Command{
Short: "Get information about a particular Docker Node",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(logLevel))

if id == "" {
cmd.Help()
log.Fatalln("No Node ID specified")
}
client, err := ucp.ReadToken()
if err != nil {
// Fatal error if can't read the token
Expand All @@ -78,8 +133,126 @@ var ucpNodesGet = &cobra.Command{
log.Fatalf("%v", err)
}
log.Debugf("Retrieved information about [%s]", node.Description.Hostname)

w := tabwriter.NewWriter(os.Stdout, 0, 0, tabPadding, ' ', 0)
fmt.Fprintln(w, "Label Key\tLabel Value")
for k, v := range node.Spec.Labels {
fmt.Printf("%s / %s\n", k, v)
fmt.Fprintf(w, "%s\t%s\n", k, v)
}
w.Flush()
},
}

var ucpNodesOrchestrator = &cobra.Command{
Use: "orchestrator",
Short: "Configure which orchestrators can utilise a node",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(logLevel))
if id == "" {
cmd.Help()
log.Fatalln("No Node ID specified")
}
client, err := ucp.ReadToken()
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}

// If both orchestrators are false, then neither can schedule workloads (display a warning)
if orchestratorKube == false && orchestratorSwarm == false {
log.Warn("This node has no orchestrators defined and wont be scheduled any workload")
}

err = client.SetNodeLabel(id, "com.docker.ucp.orchestrator.kubernetes", strconv.FormatBool(orchestratorKube))
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}
err = client.SetNodeLabel(id, "com.docker.ucp.orchestrator.swarm", strconv.FormatBool(orchestratorSwarm))
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}
log.Infof("Configured Node [%s] to allow kubernetes=%t and swarm=%t", id, orchestratorKube, orchestratorSwarm)
},
}

var ucpNodesAvailability = &cobra.Command{
Use: "availability",
Short: "Set the node availability [active/drain/pause]",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(logLevel))
if id == "" {
cmd.Help()
log.Fatalln("No Node ID specified")
}
client, err := ucp.ReadToken()
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}

err = client.SetNodeAvailability(id, availability)
if err != nil {
log.Fatalf("%v", err)
}

log.Infof("Succesfully set node [%s] to state [%s]", id, availability)
},
}

var ucpNodesRole = &cobra.Command{
Use: "role",
Short: "Set the node role [manager/worker]",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(logLevel))
if id == "" {
cmd.Help()
log.Fatalln("No Node ID specified")
}

if role == "" {
cmd.Help()
log.Fatalln("No Node Role specified, should be either manager or worker")
}

client, err := ucp.ReadToken()
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}
err = client.SetNodeRole(id, role)
if err != nil {
log.Fatalf("%v", err)
}

log.Infof("Succesfully set node [%s] to swarm role [%s]", id, role)
},
}

var ucpNodesLabel = &cobra.Command{
Use: "label",
Short: "Set a label and value on a node",
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(logLevel))
if id == "" {
cmd.Help()
log.Fatalln("No Node ID specified")
}
if labelKey == "" {
cmd.Help()
log.Fatalln("No label key has been specified")
}
client, err := ucp.ReadToken()
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}
err = client.SetNodeLabel(id, labelKey, labelValue)
if err != nil {
// Fatal error if can't read the token
log.Fatalf("%v", err)
}
log.Infof("Succesfully updated node [%s] with the label [%s=%s]", id, labelKey, labelValue)
},
}
86 changes: 86 additions & 0 deletions docs/ucp/nodes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
## Nodes

The `node` subcommand allows a user to interigate and make various configuration changes to all nodes that are part of a Docker EE cluster


### List all nodes in the EE cluster

The `diver ucp nodes list` command will return all nodes across the entire cluster.

```
diver ucp nodes list
Name ID Role Version Platform Swarm Kubernetes
docker03.local d5kqmkg5elaq1ygk6nf7qzmer worker 18.03.1-ce linux/x86_64 true false
docker01.local l8h2ejtpxkuf5o2loygwk8zun manager 18.03.1-ce linux/x86_64 true true
docker02.local tlbmntgk7plu19w3ob98r2nel worker 18.03.1-ce linux/x86_64 true false
```

### Manage Orchestrators

By default in Docker EE 2+ there is the option of using multiple orchestrators to manage the Docker EE nodes, the `orchestrator` command provides the option to set which orchestrator will manage the nodes.

**NOTE** it appears that you can also set a node to have no orchestrator, effectively rendering the node unusable by UCP.

The `--swarm` and `--kubernetes` flags will enable either or both orchestrators:

```
diver ucp nodes orchestrator --swarm --id tlbmntgk7plu19w3ob98r2nel
INFO[0000] Configured Node [tlbmntgk7plu19w3ob98r2nel] to allow kubernetes=false and swarm=true
```

De-activating both orchestrators requires passing no flags:

```
diver ucp nodes orchestrator --id tlbmntgk7plu19w3ob98r2nel
WARN[0000] This node has no orchestrators defined and wont be scheduled any workload
INFO[0000] Configured Node [tlbmntgk7plu19w3ob98r2nel] to allow kubernetes=false and swarm=false
```

### Manage Availability

Nodes have three usage states:

- active - in use
- paused - wont take additional tasks
- drain - remove all running tasks

Setting the availability state:

```
diver ucp nodes availability --id d5kqmkg5elaq1ygk6nf7qzmer --state active
INFO[0000] Succesfully set node [d5kqmkg5elaq1ygk6nf7qzmer] to state [active]
```

### Manage node role

Docker nodes can either be managers (run UCP etc.) or workers, which is where most workloads will run

Set the node role:

```
diver ucp nodes role --role worker --id tlbmntgk7plu19w3ob98r2nel
INFO[0000] Succesfully set node [tlbmntgk7plu19w3ob98r2nel] to swarm role [worker]
```

### Apply labels to a node

```
diver ucp nodes label --key labelkey --value labelvalue --id tlbmntgk7plu19w3ob98r2nel
INFO[0000] Succesfully updated node [tlbmntgk7plu19w3ob98r2nel] with the label [labelkey=labelvalue]
```

### Investigate a nodes labels

```
diver ucp nodes get --id tlbmntgk7plu19w3ob98r2nel
Label Key Label Value
labelkey labelvalue
com.docker.ucp.collection system
com.docker.ucp.collection.root true
com.docker.ucp.collection.system true
com.docker.ucp.orchestrator.swarm false
com.docker.ucp.SANs 192.168.0.141,localhost,proxy.local,docker02.local,tmpp1fniumpwgvv5c1vusexcw,172.17.0.1,127.0.0.1,10.96.0.1
com.docker.ucp.access.label /System
com.docker.ucp.collection.swarm true
com.docker.ucp.orchestrator.kubernetes false
```
Loading

0 comments on commit d725a12

Please sign in to comment.