diff --git a/google/provider_compute_gen.go b/google/provider_compute_gen.go index 53652ff72fc..3f4814a8e5f 100644 --- a/google/provider_compute_gen.go +++ b/google/provider_compute_gen.go @@ -18,6 +18,7 @@ import "github.com/hashicorp/terraform/helper/schema" var GeneratedComputeResourcesMap = map[string]*schema.Resource{ "google_compute_backend_bucket": resourceComputeBackendBucket(), + "google_compute_disk": resourceComputeDisk(), "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), diff --git a/google/resource_compute_disk.go b/google/resource_compute_disk.go index 9bab6c84312..da47f092ba9 100644 --- a/google/resource_compute_disk.go +++ b/google/resource_compute_disk.go @@ -1,15 +1,30 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + package google import ( "fmt" "log" "regexp" + "strconv" "strings" "time" "github.com/hashicorp/terraform/helper/customdiff" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" + compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -21,106 +36,384 @@ var ( computeDiskUserRegex = regexp.MustCompile(computeDiskUserRegexString) ) +// Is the new disk size smaller than the old one? +func isDiskShrinkage(old, new, _ interface{}) bool { + // It's okay to remove size entirely. + if old == nil || new == nil { + return false + } + return new.(int) < old.(int) +} + +// We cannot suppress the diff for the case when family name is not part of the image name since we can't +// make a network call in a DiffSuppressFunc. +func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // 'old' is read from the API. + // It always has the format 'https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)' + matches := resolveImageLink.FindStringSubmatch(old) + if matches == nil { + // Image read from the API doesn't have the expected format. In practice, it should never happen + return false + } + oldProject := matches[1] + oldName := matches[2] + + // Partial or full self link family + if resolveImageProjectFamily.MatchString(new) { + // Value matches pattern "projects/{project}/global/images/family/{family-name}$" + matches := resolveImageProjectFamily.FindStringSubmatch(new) + newProject := matches[1] + newFamilyName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName) + } + + // Partial or full self link image + if resolveImageProjectImage.MatchString(new) { + // Value matches pattern "projects/{project}/global/images/{image-name}$" + matches := resolveImageProjectImage.FindStringSubmatch(new) + newProject := matches[1] + newImageName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName) + } + + // Partial link without project family + if resolveImageGlobalFamily.MatchString(new) { + // Value is "global/images/family/{family-name}" + matches := resolveImageGlobalFamily.FindStringSubmatch(new) + familyName := matches[1] + + return diskImageFamilyEquals(oldName, familyName) + } + + // Partial link without project image + if resolveImageGlobalImage.MatchString(new) { + // Value is "global/images/{image-name}" + matches := resolveImageGlobalImage.FindStringSubmatch(new) + imageName := matches[1] + + return diskImageEquals(oldName, imageName) + } + + // Family shorthand + if resolveImageFamilyFamily.MatchString(new) { + // Value is "family/{family-name}" + matches := resolveImageFamilyFamily.FindStringSubmatch(new) + familyName := matches[1] + + return diskImageFamilyEquals(oldName, familyName) + } + + // Shorthand for image or family + if resolveImageProjectImageShorthand.MatchString(new) { + // Value is "{project}/{image-name}" or "{project}/{family-name}" + matches := resolveImageProjectImageShorthand.FindStringSubmatch(new) + newProject := matches[1] + newName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && + (diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName)) + } + + // Image or family only + if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) { + // Value is "{image-name}" or "{family-name}" + return true + } + + return false +} + +func diskImageProjectNameEquals(project1, project2 string) bool { + // Convert short project name to full name + // For instance, centos => centos-cloud + fullProjectName, ok := imageMap[project2] + if ok { + project2 = fullProjectName + } + + return project1 == project2 +} + +func diskImageEquals(oldImageName, newImageName string) bool { + return oldImageName == newImageName +} + +func diskImageFamilyEquals(imageName, familyName string) bool { + // Handles the case when the image name includes the family name + // e.g. image name: debian-9-drawfork-v20180109, family name: debian-9 + if strings.Contains(imageName, familyName) { + return true + } + + if suppressCanonicalFamilyDiff(imageName, familyName) { + return true + } + + if suppressWindowsSqlFamilyDiff(imageName, familyName) { + return true + } + + if suppressWindowsFamilyDiff(imageName, familyName) { + return true + } + + return false +} + +// e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts +func suppressCanonicalFamilyDiff(imageName, familyName string) bool { + parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) + if len(parts) == 2 { + f := fmt.Sprintf("ubuntu-%s-lts", parts[1]) + if f == familyName { + return true + } + } + + return false +} + +// e.g. image: sql-2017-standard-windows-2016-dc-v20180109, family: sql-std-2017-win-2016 +// e.g. image: sql-2017-express-windows-2012-r2-dc-v20180109, family: sql-exp-2017-win-2012-r2 +func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool { + parts := windowsSqlImage.FindStringSubmatch(imageName) + if len(parts) == 5 { + edition := parts[2] // enterprise, standard or web. + sqlVersion := parts[1] + windowsVersion := parts[3] + + // Translate edition + switch edition { + case "enterprise": + edition = "ent" + case "standard": + edition = "std" + case "express": + edition = "exp" + } + + var f string + if revision := parts[4]; revision != "" { + // With revision + f = fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision) + } else { + // No revision + f = fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion) + } + + if f == familyName { + return true + } + } + + return false +} + +// e.g. image: windows-server-1709-dc-core-v20180109, family: windows-1709-core +// e.g. image: windows-server-1709-dc-core-for-containers-v20180109, family: "windows-1709-core-for-containers +func suppressWindowsFamilyDiff(imageName, familyName string) bool { + updatedFamilyString := strings.Replace(familyName, "windows-", "windows-server-", 1) + updatedFamilyString = strings.Replace(updatedFamilyString, "-core", "-dc-core", 1) + + if strings.Contains(imageName, updatedFamilyString) { + return true + } + + return false +} + +func diskEncryptionKeyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if strings.HasSuffix(k, "#") { + _, ok := d.GetOk("disk_encryption_key_raw") + // If we have a disk_encryption_key_raw, we can trust that the diff will be handled there + // and we don't need to worry about it here. + if old == "1" && new == "0" && ok { + return true + } + } + return false +} + func resourceComputeDisk() *schema.Resource { return &schema.Resource{ Create: resourceComputeDiskCreate, Read: resourceComputeDiskRead, Update: resourceComputeDiskUpdate, Delete: resourceComputeDiskDelete, + Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceComputeDiskImport, }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(5 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), + Create: schema.DefaultTimeout(300 * time.Second), + Update: schema.DefaultTimeout(240 * time.Second), + Delete: schema.DefaultTimeout(240 * time.Second), }, + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("size", isDiskShrinkage)), Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - - "zone": &schema.Schema{ + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "description": { Type: schema.TypeString, Optional: true, - Computed: true, ForceNew: true, }, - - "disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, - - "disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, + "size": { + Type: schema.TypeInt, Computed: true, + Optional: true, }, - - "image": &schema.Schema{ + "image": { Type: schema.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: diskImageDiffSuppress, }, - - "project": &schema.Schema{ + "type": { Type: schema.TypeString, Optional: true, - Computed: true, ForceNew: true, + Default: "pd-standard", }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, + "disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: diskEncryptionKeyDiffSuppress, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + "source_image_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, - - "snapshot": &schema.Schema{ + "snapshot": { Type: schema.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: linkDiffSuppress, }, - - "type": &schema.Schema{ - Type: schema.TypeString, + "source_snapshot_encryption_key": { + Type: schema.TypeList, Optional: true, - Default: "pd-standard", ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, - - "users": &schema.Schema{ + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_attach_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_detach_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "source_image_id": { + Type: schema.TypeString, + Computed: true, + }, + "source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "users": { Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Deprecated: "Use disk_encryption_key.raw_key instead.", }, - "labels": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Deprecated: "Use disk_encryption_key.sha256 instead.", }, "label_fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, }, - CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("size", isDiskShrinkage)), } } @@ -132,94 +425,190 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { return err } - // Get the zone - z, err := getZone(d, config) + descriptionProp, err := expandComputeDiskDescription(d.Get("description"), d, config) + if err != nil { + return err + } + labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) if err != nil { return err } - log.Printf("[DEBUG] Loading zone: %s", z) - zone, err := config.clientCompute.Zones.Get( - project, z).Do() + nameProp, err := expandComputeDiskName(d.Get("name"), d, config) if err != nil { - return fmt.Errorf( - "Error loading zone '%s': %s", z, err) + return err + } + sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } + sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config) + if err != nil { + return err + } + typeProp, err := expandComputeDiskType(d.Get("type"), d, config) + if err != nil { + return err + } + zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config) + if err != nil { + return err + } + diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) + if err != nil { + return err + } + sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config) + if err != nil { + return err + } + sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config) + if err != nil { + return err + } + sourceSnapshotEncryptionKeyProp, err := expandComputeDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) + if err != nil { + return err } - // Build the disk parameter - disk := &compute.Disk{ - Name: d.Get("name").(string), - SizeGb: int64(d.Get("size").(int)), + obj := map[string]interface{}{ + "description": descriptionProp, + "labels": labelsProp, + "name": nameProp, + "sizeGb": sizeGbProp, + "sourceImage": sourceImageProp, + "type": typeProp, + "zone": zoneProp, + "diskEncryptionKey": diskEncryptionKeyProp, + "sourceImageEncryptionKey": sourceImageEncryptionKeyProp, + "sourceSnapshot": sourceSnapshotProp, + "sourceSnapshotEncryptionKey": sourceSnapshotEncryptionKeyProp, + } + obj, err = resourceComputeDiskEncoder(d, meta, obj) + if err != nil { + return err } - // If we were given a source image, load that. - if v, ok := d.GetOk("image"); ok { - log.Printf("[DEBUG] Resolving image name: %s", v.(string)) - imageUrl, err := resolveImage(config, project, v.(string)) - if err != nil { - return fmt.Errorf( - "Error resolving image name '%s': %s", - v.(string), err) - } + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks") + if err != nil { + return err + } - disk.SourceImage = imageUrl - log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) + log.Printf("[DEBUG] Creating new Disk: %#v", obj) + res, err := Post(config, url, obj) + if err != nil { + return fmt.Errorf("Error creating Disk: %s", err) } - if v, ok := d.GetOk("type"); ok { - log.Printf("[DEBUG] Loading disk type: %s", v.(string)) - diskType, err := readDiskType(config, zone, project, v.(string)) - if err != nil { - return fmt.Errorf( - "Error loading disk type '%s': %s", - v.(string), err) - } + // Store the ID now + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) - disk.Type = diskType.SelfLink + op := &compute.Operation{} + err = Convert(res, op) + if err != nil { + return err } - if v, ok := d.GetOk("snapshot"); ok { - snapshotName := v.(string) - match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName) - if match { - disk.SourceSnapshot = snapshotName - } else { - log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) - snapshotData, err := config.clientCompute.Snapshots.Get( - project, snapshotName).Do() + waitErr := computeOperationWaitTime( + config.clientCompute, op, project, "Creating Disk", + int(d.Timeout(schema.TimeoutCreate).Minutes())) - if err != nil { - return fmt.Errorf( - "Error loading snapshot '%s': %s", - snapshotName, err) - } - disk.SourceSnapshot = snapshotData.SelfLink - } + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Disk: %s", waitErr) } - if v, ok := d.GetOk("disk_encryption_key_raw"); ok { - disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} - disk.DiskEncryptionKey.RawKey = v.(string) - } + log.Printf("[DEBUG] Finished creating Disk %q: %#v", d.Id(), res) - if _, ok := d.GetOk("labels"); ok { - disk.Labels = expandLabels(d) + return resourceComputeDiskRead(d, meta) +} + +func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err } - op, err := config.clientCompute.Disks.Insert( - project, z, disk).Do() + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}") if err != nil { - return fmt.Errorf("Error creating disk: %s", err) + return err } - // It probably maybe worked, so store the ID now - d.SetId(disk.Name) + res, err := Get(config, url) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) + } - err = computeOperationWaitTime(config.clientCompute, op, project, "Creating Disk", int(d.Timeout(schema.TimeoutCreate).Minutes())) + res, err = resourceComputeDiskDecoder(d, meta, res) if err != nil { - d.SetId("") return err } - return resourceComputeDiskRead(d, meta) + + if err := d.Set("creation_timestamp", flattenComputeDiskCreationTimestamp(res["creationTimestamp"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("description", flattenComputeDiskDescription(res["description"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("last_attach_timestamp", flattenComputeDiskLastAttachTimestamp(res["lastAttachTimestamp"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("last_detach_timestamp", flattenComputeDiskLastDetachTimestamp(res["lastDetachTimestamp"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("labels", flattenComputeDiskLabels(res["labels"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("name", flattenComputeDiskName(res["name"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("type", flattenComputeDiskType(res["type"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("users", flattenComputeDiskUsers(res["users"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("zone", flattenComputeDiskZone(res["zone"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_snapshot_encryption_key", flattenComputeDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_snapshot_id", flattenComputeDiskSourceSnapshotId(res["sourceSnapshotId"])); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("self_link", res["selfLink"]); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + + return nil } func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { @@ -229,104 +618,84 @@ func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } - z, err := getZone(d, config) - if err != nil { - return err - } + + var obj map[string]interface{} + var url string + var res map[string]interface{} + op := &compute.Operation{} + d.Partial(true) - if d.HasChange("size") { - rb := &compute.DisksResizeRequest{ - SizeGb: int64(d.Get("size").(int)), - } - op, err := config.clientCompute.Disks.Resize( - project, z, d.Id(), rb).Do() - if err != nil { - return fmt.Errorf("Error resizing disk: %s", err) - } - d.SetPartial("size") - err = computeOperationWaitTime(config.clientCompute, op, project, "Resizing Disk", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if d.HasChange("labels") { + labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) if err != nil { return err } - } - if d.HasChange("labels") { - zslr := compute.ZoneSetLabelsRequest{ - Labels: expandLabels(d), - LabelFingerprint: d.Get("label_fingerprint").(string), + obj = map[string]interface{}{ + "labels": labelsProp, + "labelFingerprint": d.Get("label_fingerprint").(string)} + url, err = replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels") + if err != nil { + return err } - op, err := config.clientCompute.Disks.SetLabels( - project, z, d.Id(), &zslr).Do() + res, err = sendRequest(config, "POST", url, obj) if err != nil { - return fmt.Errorf("Error when setting labels: %s", err) + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) } - d.SetPartial("labels") - err = computeOperationWaitTime(config.clientCompute, op, project, "Setting labels on disk", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + err = Convert(res, op) if err != nil { return err } - } - d.Partial(false) - return resourceComputeDiskRead(d, meta) -} - -func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + err = computeOperationWaitTime( + config.clientCompute, op, project, "Updating Disk", + int(d.Timeout(schema.TimeoutUpdate).Minutes())) - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } + if err != nil { + return err + } - getDisk := func(zone string) (interface{}, error) { - return config.clientCompute.Disks.Get(project, zone, d.Id()).Do() + d.SetPartial("labels") } + if d.HasChange("size") { + sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } - var disk *compute.Disk - if zone, _ := getZone(d, config); zone != "" { - disk, err = config.clientCompute.Disks.Get( - project, zone, d.Id()).Do() + obj = map[string]interface{}{ + "sizeGb": sizeGbProp, + } + url, err = replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize") + if err != nil { + return err + } + res, err = sendRequest(config, "POST", url, obj) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string))) + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) } - } else { - // If the resource was imported, the only info we have is the ID. Try to find the resource - // by searching in the region of the project. - var resource interface{} - resource, err = getZonalResourceFromRegion(getDisk, region, config.clientCompute, project) + err = Convert(res, op) if err != nil { return err } - disk = resource.(*compute.Disk) - } + err = computeOperationWaitTime( + config.clientCompute, op, project, "Updating Disk", + int(d.Timeout(schema.TimeoutUpdate).Minutes())) - d.Set("name", disk.Name) - d.Set("self_link", disk.SelfLink) - d.Set("type", GetResourceNameFromSelfLink(disk.Type)) - d.Set("zone", GetResourceNameFromSelfLink(disk.Zone)) - d.Set("size", disk.SizeGb) - d.Set("users", disk.Users) - if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { - d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256) + if err != nil { + return err + } + + d.SetPartial("size") } - d.Set("image", disk.SourceImage) - d.Set("snapshot", disk.SourceSnapshot) - d.Set("labels", disk.Labels) - d.Set("label_fingerprint", disk.LabelFingerprint) - d.Set("project", project) + d.Partial(false) - return nil + return resourceComputeDiskRead(d, meta) } func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { @@ -336,7 +705,8 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } - z, err := getZone(d, config) + + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}") if err != nil { return err } @@ -390,213 +760,377 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { } } } + log.Printf("[DEBUG] Deleting Disk %q", d.Id()) + res, err := Delete(config, url) + if err != nil { + return fmt.Errorf("Error deleting Disk %q: %s", d.Id(), err) + } - // Delete the disk - op, err := config.clientCompute.Disks.Delete( - project, z, d.Id()).Do() + op := &compute.Operation{} + err = Convert(res, op) if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - return fmt.Errorf("Error deleting disk: %s", err) + return err } - err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Disk", int(d.Timeout(schema.TimeoutDelete).Minutes())) + err = computeOperationWaitTime( + config.clientCompute, op, project, "Deleting Disk", + int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { return err } - d.SetId("") + log.Printf("[DEBUG] Finished deleting Disk %q: %#v", d.Id(), res) return nil } -// Is the new disk size smaller than the old one? -func isDiskShrinkage(old, new, _ interface{}) bool { - // It's okay to remove size entirely. - if old == nil || new == nil { - return false +func resourceComputeDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) } - return new.(int) < old.(int) -} + d.SetId(id) + // In the end, it's possible that someone has tried to import + // a disk using only the region. To find out what zone the + // disk is in, we need to check every zone in the region, to + // see if we can find a disk with the same name. This will + // find the first disk in the specified region with a matching + // name. There might be multiple matching disks - we're not + // considering that an error case here. We don't check for it. + if zone, err := getZone(d, config); err != nil || zone == "" { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + region, err := getRegion(d, config) + if err != nil { + return nil, err + } -// We cannot suppress the diff for the case when family name is not part of the image name since we can't -// make a network call in a DiffSuppressFunc. -func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - // 'old' is read from the API. - // It always has the format 'https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)' - matches := resolveImageLink.FindStringSubmatch(old) - if matches == nil { - // Image read from the API doesn't have the expected format. In practice, it should never happen - return false + getDisk := func(zone string) (interface{}, error) { + return config.clientCompute.Disks.Get(project, zone, d.Id()).Do() + } + resource, err := getZonalResourceFromRegion(getDisk, region, config.clientCompute, project) + if err != nil { + return nil, err + } + d.Set("zone", resource.(*compute.Disk).Zone) } - oldProject := matches[1] - oldName := matches[2] - // Partial or full self link family - if resolveImageProjectFamily.MatchString(new) { - // Value matches pattern "projects/{project}/global/images/family/{family-name}$" - matches := resolveImageProjectFamily.FindStringSubmatch(new) - newProject := matches[1] - newFamilyName := matches[2] + return []*schema.ResourceData{d}, nil +} - return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName) - } +func flattenComputeDiskCreationTimestamp(v interface{}) interface{} { + return v +} - // Partial or full self link image - if resolveImageProjectImage.MatchString(new) { - // Value matches pattern "projects/{project}/global/images/{image-name}$" - matches := resolveImageProjectImage.FindStringSubmatch(new) - newProject := matches[1] - newImageName := matches[2] +func flattenComputeDiskDescription(v interface{}) interface{} { + return v +} - return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName) - } +func flattenComputeDiskLastAttachTimestamp(v interface{}) interface{} { + return v +} - // Partial link without project family - if resolveImageGlobalFamily.MatchString(new) { - // Value is "global/images/family/{family-name}" - matches := resolveImageGlobalFamily.FindStringSubmatch(new) - familyName := matches[1] +func flattenComputeDiskLastDetachTimestamp(v interface{}) interface{} { + return v +} - return diskImageFamilyEquals(oldName, familyName) - } +func flattenComputeDiskLabels(v interface{}) interface{} { + return v +} - // Partial link without project image - if resolveImageGlobalImage.MatchString(new) { - // Value is "global/images/{image-name}" - matches := resolveImageGlobalImage.FindStringSubmatch(new) - imageName := matches[1] +func flattenComputeDiskName(v interface{}) interface{} { + return v +} - return diskImageEquals(oldName, imageName) +func flattenComputeDiskSize(v interface{}) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. } + return v +} - // Family shorthand - if resolveImageFamilyFamily.MatchString(new) { - // Value is "family/{family-name}" - matches := resolveImageFamilyFamily.FindStringSubmatch(new) - familyName := matches[1] +func flattenComputeDiskImage(v interface{}) interface{} { + return v +} - return diskImageFamilyEquals(oldName, familyName) - } +func flattenComputeDiskType(v interface{}) interface{} { + return NameFromSelfLinkStateFunc(v) +} - // Shorthand for image or family - if resolveImageProjectImageShorthand.MatchString(new) { - // Value is "{project}/{image-name}" or "{project}/{family-name}" - matches := resolveImageProjectImageShorthand.FindStringSubmatch(new) - newProject := matches[1] - newName := matches[2] +func flattenComputeDiskUsers(v interface{}) interface{} { + return v +} - return diskImageProjectNameEquals(oldProject, newProject) && - (diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName)) +func flattenComputeDiskZone(v interface{}) interface{} { + return NameFromSelfLinkStateFunc(v) +} + +func flattenComputeDiskDiskEncryptionKey(v interface{}) interface{} { + if v == nil { + return nil } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"]) + transformed["sha256"] = + flattenComputeDiskDiskEncryptionKeySha256(original["sha256"]) + return []interface{}{transformed} +} +func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}) interface{} { + return v +} - // Image or family only - if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) { - // Value is "{image-name}" or "{family-name}" - return true +func flattenComputeDiskDiskEncryptionKeySha256(v interface{}) interface{} { + return v +} + +func flattenComputeDiskSourceImageEncryptionKey(v interface{}) interface{} { + if v == nil { + return nil } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskSourceImageEncryptionKeyRawKey(original["rawKey"]) + transformed["sha256"] = + flattenComputeDiskSourceImageEncryptionKeySha256(original["sha256"]) + return []interface{}{transformed} +} +func flattenComputeDiskSourceImageEncryptionKeyRawKey(v interface{}) interface{} { + return v +} - return false +func flattenComputeDiskSourceImageEncryptionKeySha256(v interface{}) interface{} { + return v } -func diskImageProjectNameEquals(project1, project2 string) bool { - // Convert short project name to full name - // For instance, centos => centos-cloud - fullProjectName, ok := imageMap[project2] - if ok { - project2 = fullProjectName +func flattenComputeDiskSourceImageId(v interface{}) interface{} { + return v +} + +func flattenComputeDiskSnapshot(v interface{}) interface{} { + return v +} + +func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}) interface{} { + if v == nil { + return nil } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"]) + transformed["sha256"] = + flattenComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"]) + return []interface{}{transformed} +} +func flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}) interface{} { + return v +} - return project1 == project2 +func flattenComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}) interface{} { + return v } -func diskImageEquals(oldImageName, newImageName string) bool { - return oldImageName == newImageName +func flattenComputeDiskSourceSnapshotId(v interface{}) interface{} { + return v } -func diskImageFamilyEquals(imageName, familyName string) bool { - // Handles the case when the image name includes the family name - // e.g. image name: debian-9-drawfork-v20180109, family name: debian-9 - if strings.Contains(imageName, familyName) { - return true +func expandComputeDiskDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) } + return m, nil +} - if suppressCanonicalFamilyDiff(imageName, familyName) { - return true +func expandComputeDiskName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSize(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskImage(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) } + return f.RelativeLink(), nil +} - if suppressWindowsSqlFamilyDiff(imageName, familyName) { - return true +func expandComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, 1) + if len(l) == 1 { + // There is a value + outMap := make(map[string]interface{}) + outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"] + req = append(req, outMap) + } else { + // Check alternative setting? + if altV, ok := d.GetOk("disk_encryption_key_raw"); ok { + outMap := make(map[string]interface{}) + outMap["rawKey"] = altV + req = append(req, outMap) + } } + return req, nil +} - if suppressWindowsFamilyDiff(imageName, familyName) { - return true +func expandComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, 1) + if len(l) == 1 { + // There is a value + outMap := make(map[string]interface{}) + outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"] + req = append(req, outMap) } + return req, nil +} - return false +func expandComputeDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil } -// e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts -func suppressCanonicalFamilyDiff(imageName, familyName string) bool { - parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) - if len(parts) == 2 { - f := fmt.Sprintf("ubuntu-%s-lts", parts[1]) - if f == familyName { - return true - } +func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, 1) + if len(l) == 1 { + // There is a value + outMap := make(map[string]interface{}) + outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"] + req = append(req, outMap) } - - return false + return req, nil } -// e.g. image: sql-2017-standard-windows-2016-dc-v20180109, family: sql-std-2017-win-2016 -// e.g. image: sql-2017-express-windows-2012-r2-dc-v20180109, family: sql-exp-2017-win-2012-r2 -func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool { - parts := windowsSqlImage.FindStringSubmatch(imageName) - if len(parts) == 5 { - edition := parts[2] // enterprise, standard or web. - sqlVersion := parts[1] - windowsVersion := parts[3] +func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*Config) - // Translate edition - switch edition { - case "enterprise": - edition = "ent" - case "standard": - edition = "std" - case "express": - edition = "exp" + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + // Get the zone + z, err := getZone(d, config) + if err != nil { + return nil, err + } + + zone, err := config.clientCompute.Zones.Get(project, z).Do() + if err != nil { + return nil, err + } + + if v, ok := d.GetOk("image"); ok { + log.Printf("[DEBUG] Resolving image name: %s", v.(string)) + imageUrl, err := resolveImage(config, project, v.(string)) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + v.(string), err) } - var f string - if revision := parts[4]; revision != "" { - // With revision - f = fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision) - } else { - // No revision - f = fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion) + obj["sourceImage"] = imageUrl + log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) + } + + if v, ok := d.GetOk("type"); ok { + log.Printf("[DEBUG] Loading disk type: %s", v.(string)) + diskType, err := readDiskType(config, zone, project, v.(string)) + if err != nil { + return nil, fmt.Errorf( + "Error loading disk type '%s': %s", + v.(string), err) } - if f == familyName { - return true + obj["type"] = diskType.SelfLink + } + + if v, ok := d.GetOk("snapshot"); ok { + snapshotName := v.(string) + match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName) + if match { + obj["sourceSnapshot"] = snapshotName + } else { + log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) + snapshotData, err := config.clientCompute.Snapshots.Get( + project, snapshotName).Do() + + if err != nil { + return nil, fmt.Errorf( + "Error loading snapshot '%s': %s", + snapshotName, err) + } + obj["sourceSnapshot"] = snapshotData.SelfLink } } - return false + return obj, nil } -// e.g. image: windows-server-1709-dc-core-v20180109, family: windows-1709-core -// e.g. image: windows-server-1709-dc-core-for-containers-v20180109, family: "windows-1709-core-for-containers -func suppressWindowsFamilyDiff(imageName, familyName string) bool { - updatedFamilyString := strings.Replace(familyName, "windows-", "windows-server-", 1) - updatedFamilyString = strings.Replace(updatedFamilyString, "-core", "-dc-core", 1) +func resourceComputeDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["diskEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["raw_key"] = d.Get("disk_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + if v, ok := d.GetOk("disk_encryption_key_raw"); ok { + transformed["raw_key"] = v + } + d.Set("disk_encryption_key_sha256", original["sha256"]) + res["diskEncryptionKey"] = transformed + } - if strings.Contains(imageName, updatedFamilyString) { - return true + if v, ok := res["sourceImageEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["raw_key"] = d.Get("source_image_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + res["sourceImageEncryptionKey"] = transformed } - return false + if v, ok := res["sourceSnapshotEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["raw_key"] = d.Get("source_snapshot_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + res["sourceSnapshotEncryptionKey"] = transformed + } + + d.Set("label_fingerprint", res["labelFingerprint"]) + + return res, nil } diff --git a/website/docs/r/compute_disk.html.markdown b/website/docs/r/compute_disk.html.markdown index 7c0b51c208e..be881390b98 100644 --- a/website/docs/r/compute_disk.html.markdown +++ b/website/docs/r/compute_disk.html.markdown @@ -1,19 +1,51 @@ --- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- layout: "google" page_title: "Google: google_compute_disk" sidebar_current: "docs-google-compute-disk" description: |- - Creates a new persistent disk within GCE, based on another disk. + Persistent disks are durable storage devices that function similarly to + the physical disks in a desktop or a server. --- # google\_compute\_disk -Creates a new persistent disk within GCE, based on another disk. For more information see -[the official documentation](https://cloud.google.com/compute/docs/disks/add-persistent-disk) -and -[API](https://cloud.google.com/compute/docs/reference/latest/disks). +Persistent disks are durable storage devices that function similarly to +the physical disks in a desktop or a server. Compute Engine manages the +hardware behind these devices to ensure data redundancy and optimize +performance for you. Persistent disks are available as either standard +hard disk drives (HDD) or solid-state drives (SSD). -~> **Note:** All arguments including the disk encryption key will be stored in the raw state as plain-text. +Persistent disks are located independently from your virtual machine +instances, so you can detach or move persistent disks to keep your data +even after you delete your instances. Persistent disk performance scales +automatically with size, so you can resize your existing persistent disks +or add more persistent disks to an instance to meet your performance and +storage space requirements. + +Add a persistent disk to your instance when you need reliable and +affordable storage with consistent performance characteristics. + +To get more information about Disk, see: + +* [API documentation](https://cloud.google.com/compute/docs/reference/latest/disks) +* How-to Guides + * [Adding a persistent disk](https://cloud.google.com/compute/docs/disks/add-persistent-disk) + +~> **Warning:** All arguments including the disk encryption key will be stored in the raw +state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html). ## Example Usage @@ -34,69 +66,176 @@ resource "google_compute_disk" "default" { The following arguments are supported: -* `name` - (Required) A unique name for the resource, required by GCE. - Changing this forces a new resource to be created. +* `name` - + (Required) + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. +* `zone` - + (Required) + A reference to the zone where the disk resides. -* `zone` - (Required) The zone where this disk will be available. - - - -* `disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key] - (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), - encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) - to encrypt this disk. - -* `image` - (Optional) The image from which to initialize this disk. This can be - one of: the image's `self_link`, `projects/{project}/global/images/{image}`, - `projects/{project}/global/images/family/{family}`, `global/images/{image}`, - `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, - `{project}/{image}`, `{family}`, or `{image}`. If referred by family, the - images names must include the family name. If they don't, use the - [google_compute_image data source](/docs/providers/google/d/datasource_compute_image.html). - For instance, the image `centos-6-v20180104` includes its family name `centos-6`. - These images can be referred by family name here. - -* `project` - (Optional) The ID of the project in which the resource belongs. If it - is not provided, the provider project is used. - -* `size` - (Optional) The size of the image in gigabytes. If not specified, it - will inherit the size of its base image. - -* `snapshot` - (Optional) Name of snapshot from which to initialize this disk. - -* `type` - (Optional) The GCE disk type. - -* `labels` - (Optional) A set of key/value label pairs to assign to the image. - +* `description` - + (Optional) + An optional description of this resource. Provide this property when + you create the resource. +* `labels` - + (Optional) + Labels to apply to this disk. A list of key->value pairs. +* `size` - + (Optional) + Size of the persistent disk, specified in GB. You can specify this + field when creating a persistent disk using the sourceImage or + sourceSnapshot parameter, or specify it alone to create an empty + persistent disk. + + If you specify this field along with sourceImage or sourceSnapshot, + the value of sizeGb must not be less than the size of the sourceImage + or the size of the snapshot. +* `image` - + (Optional) + The image from which to initialize this disk. This can be + one of: the image's `self_link`, `projects/{project}/global/images/{image}`, + `projects/{project}/global/images/family/{family}`, `global/images/{image}`, + `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, + `{project}/{image}`, `{family}`, or `{image}`. If referred by family, the + images names must include the family name. If they don't, use the + [google_compute_image data source](/docs/providers/google/d/datasource_compute_image.html). + For instance, the image `centos-6-v20180104` includes its family name `centos-6`. + These images can be referred by family name here. +* `type` - + (Optional) + URL of the disk type resource describing which disk type to use to + create the disk. Provide this when creating the disk. +* `disk_encryption_key` - + (Optional) + Encrypts the disk using a customer-supplied encryption key. + + After you encrypt a disk with a customer-supplied key, you must + provide the same key if you use the disk later (e.g. to create a disk + snapshot or an image, or to attach the disk to a virtual machine). + + Customer-supplied encryption keys do not protect access to metadata of + the disk. + + If you do not provide an encryption key when creating the disk, then + the disk will be encrypted using an automatically generated key and + you do not need to provide a key to use the disk later. Structure is documented below. +* `source_image_encryption_key` - + (Optional) + The customer-supplied encryption key of the source image. Required if + the source image is protected by a customer-supplied encryption key. Structure is documented below. +* `snapshot` - + (Optional) + The source snapshot used to create this disk. You can provide this as + a partial or full URL to the resource. For example, the following are + valid values: + + * https://www.googleapis.com/compute/v1/projects/project/global/ + snapshots/snapshot + * projects/project/global/snapshots/snapshot + * global/snapshots/snapshot + * snapshot +* `source_snapshot_encryption_key` - + (Optional) + The customer-supplied encryption key of the source snapshot. Required + if the source snapshot is protected by a customer-supplied encryption + key. Structure is documented below. +* `project` (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +The `disk_encryption_key` block supports: +* `raw_key` - + (Optional) + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. +* `sha256` - + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + + +The `source_image_encryption_key` block supports: +* `raw_key` - + (Optional) + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. +* `sha256` - + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + + +The `source_snapshot_encryption_key` block supports: +* `raw_key` - + (Optional) + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. +* `sha256` - + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + + + +* (Deprecated) `disk_encryption_key_raw`: This is an alias for + `disk_encryption_key.raw_key`. It is deprecated to enhance + consistency with `source_image_encryption_key` and + `source_snapshot_encryption_key`. ## Attributes Reference -In addition to the arguments listed above, the following computed attributes are -exported: - -* `disk_encryption_key_sha256` - The [RFC 4648 base64] - (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the - [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) - that protects this resource. - +In addition to the arguments listed above, the following computed attributes are exported: + +* `creation_timestamp` - + Creation timestamp in RFC3339 text format. +* `last_attach_timestamp` - + Last attach timestamp in RFC3339 text format. +* `last_detach_timestamp` - + Last dettach timestamp in RFC3339 text format. +* `users` - + Links to the users of the disk (attached instances) in form: + project/zones/zone/instances/instance +* `source_image_id` - + The ID value of the image used to create this disk. This value + identifies the exact image that was used to create this persistent + disk. For example, if you created the persistent disk from an image + that was later deleted and recreated under the same name, the source + image ID would identify the exact version of the image that was used. +* `source_snapshot_id` - + The unique ID of the snapshot used to create this disk. This value + identifies the exact snapshot that was used to create this persistent + disk. For example, if you created the persistent disk from a snapshot + that was later deleted and recreated under the same name, the source + snapshot ID would identify the exact version of the snapshot that was + used. * `self_link` - The URI of the created resource. -* `users` - The Users of the created resource. - -* `label_fingerprint` - The fingerprint of the assigned labels. +* (Deprecated) `disk_encryption_key_sha256`: This is an alias for + `disk_encryption_key.sha256`. It is deprecated to enhance + consistency with `source_image_encryption_key` and + `source_snapshot_encryption_key`. +* `label_fingerprint`: The fingerprint of the assigned labels. Provided + when labels are updated to prevent contention (first-write-wins). ## Timeouts -`google_compute_disk` provides the following +This resource provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: -- `create` - (Default `5 minutes`) Used for creating disks. -- `update` - (Default `5 minutes`) Used for resizing a disk and setting labels on disks. -- `delete` - (Default `5 minutes`) Used for destroying disks (not including time to detach the disk from instances). +- `create` - Default is 4 minutes. +- `update` - Default is 4 minutes. +- `delete` - Default is 4 minutes. ## Import -Disks can be imported using the `name`, e.g. +Disk can be imported using any of these accepted formats: ``` -$ terraform import google_compute_disk.default test-disk +$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/disks/{{name}} +$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{name}} +$ terraform import google_compute_disk.default {{name}} ```