Skip to content

Commit

Permalink
update of node pool workload metadata
Browse files Browse the repository at this point in the history
  • Loading branch information
emilymye committed May 15, 2020
1 parent 351f787 commit 22c5a0a
Show file tree
Hide file tree
Showing 4 changed files with 284 additions and 222 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,38 @@ var (
"addons_config.0.config_connector_config",
<% end -%>
}

forceNewClusterNodeConfigFields = []string{
<% unless version == 'ga' -%>
"workload_metadata_config",
<% end -%>
}
)

// This uses the node pool nodeConfig schema but sets
// node-pool-only updatable fields to ForceNew
func clusterSchemaNodeConfig() *schema.Schema {
nodeConfigSch := schemaNodeConfig()
schemaMap := nodeConfigSch.Elem.(*schema.Resource).Schema
for _, k := range forceNewClusterNodeConfigFields {
if sch, ok := schemaMap[k]; ok {
changeFieldSchemaToForceNew(sch)
}
}
return nodeConfigSch
}

func changeFieldSchemaToForceNew(sch *schema.Schema) {
sch.ForceNew = true
if sch.Type == schema.TypeList {
if nestedR, ok := sch.Elem.(*schema.Resource); ok {
for _, nestedSch := range nestedR.Schema {
changeFieldSchemaToForceNew(nestedSch)
}
}
}
}

func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool {
// This diff gets applied in the cloud console if you specify
// "FREQ=DAILY" in your config and add a maintenance exclusion.
Expand Down Expand Up @@ -663,7 +693,7 @@ func resourceContainerCluster() *schema.Resource {
},
},

"node_config": schemaNodeConfig,
"node_config": clusterSchemaNodeConfig(),

"node_pool": {
Type: schema.TypeList,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ var schemaNodePool = map[string]*schema.Schema{
ForceNew: true,
},

"node_config": schemaNodeConfig,
"node_config": schemaNodeConfig(),

"node_count": {
Type: schema.TypeInt,
Expand Down Expand Up @@ -711,7 +711,40 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node

log.Printf("[INFO] Updated image type in Node Pool %s", d.Id())
}
<% unless version == 'ga' -%>
if d.HasChange(prefix + "node_config.0.workload_metadata_config") {
req := &containerBeta.UpdateNodePoolRequest{
NodePoolId: name,
WorkloadMetadataConfig: expandWorkloadMetadataConfig(
d.Get(prefix + "node_config.0.workload_metadata_config")),
}
if req.WorkloadMetadataConfig == nil {
req.ForceSendFields = []string{"WorkloadMetadataConfig"}
}
updateF := func() error {
op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.
Update(nodePoolInfo.fullyQualifiedName(name), req).Do()
if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool workload_metadata_config",
timeout)
}

// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name)
}

<% end -%>
if prefix == "" {
d.SetPartial("node_config")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,10 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
}

<% unless version.nil? || version == 'ga' -%>
func TestAccContainerNodePool_withWorkloadMetadataConfig(t *testing.T) {
func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
np := fmt.Sprintf("tf-test-np-%s", randString(t, 10))

Expand All @@ -195,22 +196,6 @@ func TestAccContainerNodePool_withWorkloadMetadataConfig(t *testing.T) {
"node_config.0.workload_metadata_config.0.node_metadata",
},
},
},
})
}

func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
np := fmt.Sprintf("tf-test-np-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadataServer(pid, cluster, np),
Check: resource.ComposeTestCheckFunc(
Expand Down
Loading

0 comments on commit 22c5a0a

Please sign in to comment.