diff --git a/cmd/ucp_nodes.go b/cmd/ucp_nodes.go index 92684e8..59d3de3 100644 --- a/cmd/ucp_nodes.go +++ b/cmd/ucp_nodes.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" "os" + "strconv" "text/tabwriter" log "github.com/Sirupsen/logrus" @@ -10,11 +11,42 @@ import ( "github.com/thebsdbox/diver/pkg/ucp" ) +// Used to enable or disable orchestrator type +var orchestratorKube, orchestratorSwarm bool + +// Set a node to a swarm availability state +var availability string + +// Set a node to a specific role type +var role string + +// Set a label on a node +var labelKey, labelValue string + func init() { + + ucpNodesAvailability.Flags().StringVar(&id, "id", "", "ID of the Docker Node") + ucpNodesAvailability.Flags().StringVar(&availability, "state", "active", "Node availability [active/drain/pause]") + ucpNodesGet.Flags().StringVar(&id, "id", "", "ID of the Docker Node") - ucpNodes.AddCommand(ucpNodesList) + ucpNodesLabel.Flags().StringVar(&id, "id", "", "ID of the Docker Node") + ucpNodesLabel.Flags().StringVar(&labelKey, "key", "", "The label Key") + ucpNodesLabel.Flags().StringVar(&labelValue, "value", "", "The label Value") + + ucpNodesOrchestrator.Flags().StringVar(&id, "id", "", "ID of the Docker Node") + ucpNodesOrchestrator.Flags().BoolVar(&orchestratorKube, "kubernetes", false, "Enable Kubernetes to use this node") + ucpNodesOrchestrator.Flags().BoolVar(&orchestratorSwarm, "swarm", false, "Enable Swarm to use this node") + + ucpNodesRole.Flags().StringVar(&id, "id", "", "ID of the Docker Node") + ucpNodesRole.Flags().StringVar(&role, "role", "", "Node role [manager/worker]") + + ucpNodes.AddCommand(ucpNodesAvailability) ucpNodes.AddCommand(ucpNodesGet) + ucpNodes.AddCommand(ucpNodesLabel) + ucpNodes.AddCommand(ucpNodesList) + ucpNodes.AddCommand(ucpNodesOrchestrator) + ucpNodes.AddCommand(ucpNodesRole) // Add nodes to UCP root commands UCPRoot.AddCommand(ucpNodes) @@ -51,11 +83,31 @@ var ucpNodesList = &cobra.Command{ log.Fatalf("No Nodes found") } w := tabwriter.NewWriter(os.Stdout, 0, 0, tabPadding, ' ', 0) - fmt.Fprintln(w, "Name\tID\tRole\tVersion\tPlatform") + fmt.Fprintln(w, "Name\tID\tRole\tVersion\tPlatform\tSwarm\tKubernetes") for i := range nodes { // Combine OS/Arch platform := nodes[i].Description.Platform.OS + "/" + nodes[i].Description.Platform.Architecture - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", nodes[i].Description.Hostname, nodes[i].ID, nodes[i].Spec.Role, nodes[i].Description.Engine.EngineVersion, platform) + + // Determine Orchestrator configuration + orchestratorKube, err = strconv.ParseBool(nodes[i].Spec.Labels["com.docker.ucp.orchestrator.kubernetes"]) + if err != nil { + // If there is an error it means that the label isn't part of the spec, default to disabled + orchestratorKube = false + } + + orchestratorSwarm, err = strconv.ParseBool(nodes[i].Spec.Labels["com.docker.ucp.orchestrator.swarm"]) + if err != nil { + // If there is an error it means that the label isn't part of the spec, default to disabled + orchestratorSwarm = false + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%t\t%t\n", nodes[i].Description.Hostname, + nodes[i].ID, + nodes[i].Spec.Role, + nodes[i].Description.Engine.EngineVersion, + platform, + orchestratorSwarm, + orchestratorKube) } w.Flush() }, @@ -66,7 +118,10 @@ var ucpNodesGet = &cobra.Command{ Short: "Get information about a particular Docker Node", Run: func(cmd *cobra.Command, args []string) { log.SetLevel(log.Level(logLevel)) - + if id == "" { + cmd.Help() + log.Fatalln("No Node ID specified") + } client, err := ucp.ReadToken() if err != nil { // Fatal error if can't read the token @@ -78,8 +133,126 @@ var ucpNodesGet = &cobra.Command{ log.Fatalf("%v", err) } log.Debugf("Retrieved information about [%s]", node.Description.Hostname) + + w := tabwriter.NewWriter(os.Stdout, 0, 0, tabPadding, ' ', 0) + fmt.Fprintln(w, "Label Key\tLabel Value") for k, v := range node.Spec.Labels { - fmt.Printf("%s / %s\n", k, v) + fmt.Fprintf(w, "%s\t%s\n", k, v) + } + w.Flush() + }, +} + +var ucpNodesOrchestrator = &cobra.Command{ + Use: "orchestrator", + Short: "Configure which orchestrators can utilise a node", + Run: func(cmd *cobra.Command, args []string) { + log.SetLevel(log.Level(logLevel)) + if id == "" { + cmd.Help() + log.Fatalln("No Node ID specified") + } + client, err := ucp.ReadToken() + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + + // If both orchestrators are false, then neither can schedule workloads (display a warning) + if orchestratorKube == false && orchestratorSwarm == false { + log.Warn("This node has no orchestrators defined and wont be scheduled any workload") + } + + err = client.SetNodeLabel(id, "com.docker.ucp.orchestrator.kubernetes", strconv.FormatBool(orchestratorKube)) + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + err = client.SetNodeLabel(id, "com.docker.ucp.orchestrator.swarm", strconv.FormatBool(orchestratorSwarm)) + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + log.Infof("Configured Node [%s] to allow kubernetes=%t and swarm=%t", id, orchestratorKube, orchestratorSwarm) + }, +} + +var ucpNodesAvailability = &cobra.Command{ + Use: "availability", + Short: "Set the node availability [active/drain/pause]", + Run: func(cmd *cobra.Command, args []string) { + log.SetLevel(log.Level(logLevel)) + if id == "" { + cmd.Help() + log.Fatalln("No Node ID specified") + } + client, err := ucp.ReadToken() + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + + err = client.SetNodeAvailability(id, availability) + if err != nil { + log.Fatalf("%v", err) + } + + log.Infof("Succesfully set node [%s] to state [%s]", id, availability) + }, +} + +var ucpNodesRole = &cobra.Command{ + Use: "role", + Short: "Set the node role [manager/worker]", + Run: func(cmd *cobra.Command, args []string) { + log.SetLevel(log.Level(logLevel)) + if id == "" { + cmd.Help() + log.Fatalln("No Node ID specified") + } + + if role == "" { + cmd.Help() + log.Fatalln("No Node Role specified, should be either manager or worker") + } + + client, err := ucp.ReadToken() + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + err = client.SetNodeRole(id, role) + if err != nil { + log.Fatalf("%v", err) + } + + log.Infof("Succesfully set node [%s] to swarm role [%s]", id, role) + }, +} + +var ucpNodesLabel = &cobra.Command{ + Use: "label", + Short: "Set a label and value on a node", + Run: func(cmd *cobra.Command, args []string) { + log.SetLevel(log.Level(logLevel)) + if id == "" { + cmd.Help() + log.Fatalln("No Node ID specified") + } + if labelKey == "" { + cmd.Help() + log.Fatalln("No label key has been specified") + } + client, err := ucp.ReadToken() + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) + } + err = client.SetNodeLabel(id, labelKey, labelValue) + if err != nil { + // Fatal error if can't read the token + log.Fatalf("%v", err) } + log.Infof("Succesfully updated node [%s] with the label [%s=%s]", id, labelKey, labelValue) }, } diff --git a/docs/ucp/nodes.md b/docs/ucp/nodes.md new file mode 100644 index 0000000..ab25851 --- /dev/null +++ b/docs/ucp/nodes.md @@ -0,0 +1,86 @@ +## Nodes + +The `node` subcommand allows a user to interigate and make various configuration changes to all nodes that are part of a Docker EE cluster + + +### List all nodes in the EE cluster + +The `diver ucp nodes list` command will return all nodes across the entire cluster. + +``` +diver ucp nodes list +Name ID Role Version Platform Swarm Kubernetes +docker03.local d5kqmkg5elaq1ygk6nf7qzmer worker 18.03.1-ce linux/x86_64 true false +docker01.local l8h2ejtpxkuf5o2loygwk8zun manager 18.03.1-ce linux/x86_64 true true +docker02.local tlbmntgk7plu19w3ob98r2nel worker 18.03.1-ce linux/x86_64 true false +``` + +### Manage Orchestrators + +By default in Docker EE 2+ there is the option of using multiple orchestrators to manage the Docker EE nodes, the `orchestrator` command provides the option to set which orchestrator will manage the nodes. + +**NOTE** it appears that you can also set a node to have no orchestrator, effectively rendering the node unusable by UCP. + +The `--swarm` and `--kubernetes` flags will enable either or both orchestrators: + +``` +diver ucp nodes orchestrator --swarm --id tlbmntgk7plu19w3ob98r2nel +INFO[0000] Configured Node [tlbmntgk7plu19w3ob98r2nel] to allow kubernetes=false and swarm=true +``` + +De-activating both orchestrators requires passing no flags: + +``` +diver ucp nodes orchestrator --id tlbmntgk7plu19w3ob98r2nel +WARN[0000] This node has no orchestrators defined and wont be scheduled any workload +INFO[0000] Configured Node [tlbmntgk7plu19w3ob98r2nel] to allow kubernetes=false and swarm=false +``` + +### Manage Availability + +Nodes have three usage states: + +- active - in use +- paused - wont take additional tasks +- drain - remove all running tasks + +Setting the availability state: + +``` +diver ucp nodes availability --id d5kqmkg5elaq1ygk6nf7qzmer --state active +INFO[0000] Succesfully set node [d5kqmkg5elaq1ygk6nf7qzmer] to state [active] +``` + +### Manage node role + +Docker nodes can either be managers (run UCP etc.) or workers, which is where most workloads will run + +Set the node role: + +``` +diver ucp nodes role --role worker --id tlbmntgk7plu19w3ob98r2nel +INFO[0000] Succesfully set node [tlbmntgk7plu19w3ob98r2nel] to swarm role [worker] +``` + +### Apply labels to a node + +``` +diver ucp nodes label --key labelkey --value labelvalue --id tlbmntgk7plu19w3ob98r2nel +INFO[0000] Succesfully updated node [tlbmntgk7plu19w3ob98r2nel] with the label [labelkey=labelvalue] +``` + +### Investigate a nodes labels + +``` +diver ucp nodes get --id tlbmntgk7plu19w3ob98r2nel +Label Key Label Value +labelkey labelvalue +com.docker.ucp.collection system +com.docker.ucp.collection.root true +com.docker.ucp.collection.system true +com.docker.ucp.orchestrator.swarm false +com.docker.ucp.SANs 192.168.0.141,localhost,proxy.local,docker02.local,tmpp1fniumpwgvv5c1vusexcw,172.17.0.1,127.0.0.1,10.96.0.1 +com.docker.ucp.access.label /System +com.docker.ucp.collection.swarm true +com.docker.ucp.orchestrator.kubernetes false +``` diff --git a/pkg/ucp/ucpNodes.go b/pkg/ucp/ucpNodes.go index 6668231..0d7f518 100644 --- a/pkg/ucp/ucpNodes.go +++ b/pkg/ucp/ucpNodes.go @@ -42,7 +42,7 @@ func (c *Client) GetNode(id string) (swarm.Node, error) { return node, err } - log.Debugf("Parsing all nodes") + log.Debugf("Parsing Node details") err = json.Unmarshal(response, &node) if err != nil { return node, err @@ -50,3 +50,102 @@ func (c *Client) GetNode(id string) (swarm.Node, error) { return node, nil } + +//SetNodeLabel - Retrieves the complete list of all nodes connected to a UCP cluster +func (c *Client) SetNodeLabel(id, k, v string) error { + + log.Debugln("Retrieving information about existing configuration") + node, err := c.GetNode(id) + if err != nil { + return err + } + + // Modify the node spec labels + node.Spec.Labels[k] = v + + b, err := json.Marshal(node.Spec) + if err != nil { + return err + } + log.Debugf("%s", b) + url := fmt.Sprintf("%s/nodes/%s/update?version=%d", c.UCPURL, id, node.Version.Index) + + response, err := c.postRequest(url, b) + if err != nil { + ParseUCPError(response) + return err + } + return nil +} + +//SetNodeAvailability - Set the node availability (Active/Pause/Drain) +func (c *Client) SetNodeAvailability(id, s string) error { + + // Cast the string to a swarm.NodeAvailability type (string) + state := swarm.NodeAvailability(s) + + //check that the state is a known type + if state != swarm.NodeAvailabilityActive && state != swarm.NodeAvailabilityPause && state != swarm.NodeAvailabilityDrain { + return fmt.Errorf("Unknown node state [%s]", state) + } + + log.Debugln("Retrieving information about existing configuration") + node, err := c.GetNode(id) + if err != nil { + return err + } + log.Debugf("Current Node state [%s], desired state [%s]", node.Spec.Availability, state) + + // Update the availability + node.Spec.Availability = state + + b, err := json.Marshal(node.Spec) + if err != nil { + return err + } + log.Debugf("%s", b) + url := fmt.Sprintf("%s/nodes/%s/update?version=%d", c.UCPURL, id, node.Version.Index) + + response, err := c.postRequest(url, b) + if err != nil { + ParseUCPError(response) + return err + } + return nil +} + +//SetNodeRole - Set the node availability (Active/Pause/Drain) +func (c *Client) SetNodeRole(id, r string) error { + + // Cast the string to a swarm.NodeAvailability type (string) + role := swarm.NodeRole(r) + + //check that the state is a known type + if role != swarm.NodeRoleManager && role != swarm.NodeRoleWorker { + return fmt.Errorf("Unknown node role [%s]", role) + } + + log.Debugln("Retrieving information about existing configuration") + node, err := c.GetNode(id) + if err != nil { + return err + } + log.Debugf("Current Node role [%s], desired role [%s]", node.Spec.Role, role) + + // Update the Node Role + node.Spec.Role = role + + b, err := json.Marshal(node.Spec) + if err != nil { + return err + } + log.Debugf("%s", b) + url := fmt.Sprintf("%s/nodes/%s/update?version=%d", c.UCPURL, id, node.Version.Index) + + response, err := c.postRequest(url, b) + if err != nil { + ParseUCPError(response) + return err + } + return nil +}