Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

vSphere provider - added disk to tf and vm is deleted and created - vm should not have been deleted #28

Closed
hashibot opened this issue Jun 13, 2017 · 1 comment
Labels
bug Type: Bug

Comments

@hashibot
Copy link

This issue was originally opened by @chrislovecnm as hashicorp/terraform#7217. It was migrated here as part of the provider split. The original body of the issue is below.


Expected behavior is a disk is added to the vm. Behavior is vm is deleted and created with two disks.

Testing this against: hashicorp/terraform#7031

But I am thinking this may be happening with other versions. Not certain.

Base TF

provider "vsphere" {
  vsphere_server = "*****"
  user = "*****"
  password = "*****"
  allow_unverified_ssl = "true"
}

module "vsphere-dc" {
  source = "./terraform/vsphere"
  long_name = "es-cl-mantl-engpipeline"
  short_name = "es-cl-mantl"
  datacenter = "Datacenter"
  cluster = "cluster"
  pool = "" # format is cluster_name/Resources/pool_name
  template = "mantl-tpl-cent7"
  network_label = "vlan409"
  domain = "mydomain.net"
  dns_server1 = "8.4.4.4"
  dns_server2 = "8.8.8.8"
  datastore = "POOL01"
  control_count = 1
  worker_count = 1
  edge_count = 1
  kubeworker_count = 0
  control_volume_size = 20 # size in gigabytes
  worker_volume_size = 20
  edge_volume_size = 20
  ssh_user = "root"
  # FIXME
  ssh_key = "foo.rsa"
  consul_dc = "dc2"

  #Optional Parameters
  #folder = ""
  #control_cpu = ""
  #worker_cpu = ""
  #edge_cpu = ""
  #control_ram = ""
  #worker_ram = ""
  #edge_ram = ""
  #disk_type = "" # thin or eager_zeored, default is thin
  #linked_clone = "" # true or false, default is false.  If using linked_clones and have problems installing Mantl, revert to full clones
}

Main TF that was applied and worked

variable "datacenter" {}
variable "cluster" {}
variable "pool" {}
variable "template" {}
variable "linked_clone" { default = false }
variable "ssh_user" {}
variable "ssh_key" {}
variable "consul_dc" {}
variable "datastore" {}
variable "disk_type" { default = "thin" }
variable "network_label" {}

variable "short_name" {default = "mantl"}
variable "long_name" {default = "mantl"}

variable "folder" {default = ""}
variable "control_count" {default = 3}
variable "worker_count" {default = 2}
variable "kubeworker_count" {default = 0}
variable "edge_count" {default = 2}
variable "control_volume_size" {default = 20}
variable "worker_volume_size" {default = 20}
variable "edge_volume_size" {default = 20}
variable "control_cpu" { default = 1 }
variable "worker_cpu" { default = 1 }
variable "edge_cpu" { default = 1 }
variable "control_ram" { default = 4096 }
variable "worker_ram" { default = 4096 }
variable "edge_ram" { default = 4096 }

variable "domain" { default = "" }
variable "dns_server1" { default = "" }
variable "dns_server2" { default = "" }

resource "vsphere_virtual_machine" "mi-control-nodes" {
  name = "${var.short_name}-control-${format("%02d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.control_cpu}"
  memory = "${var.control_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.control_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-control-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "control"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.control_count}"
}

resource "vsphere_virtual_machine" "mi-worker-nodes" {
  name = "${var.short_name}-worker-${format("%03d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.worker_cpu}"
  memory = "${var.worker_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.worker_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-worker-${format("%02d", count.index+1)}-disk1"
}

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "worker"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.worker_count}"
}

resource "vsphere_virtual_machine" "mi-kubeworker-nodes" {
  name = "${var.short_name}-kubeworker-${format("%03d", count.index+1)}"

  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.worker_cpu}"
  memory = "${var.worker_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.worker_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-kubeworker-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "kubeworker"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.kubeworker_count}"
}

resource "vsphere_virtual_machine" "mi-edge-nodes" {
  name = "${var.short_name}-edge-${format("%02d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.edge_cpu}"
  memory = "${var.edge_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.edge_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-edge-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "edge"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
    user = "${var.ssh_user}"
    key_file = "${var.ssh_key}"
    host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.edge_count}"
}

output "control_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-control-nodes.*.network_interface.0.ipv4_address)}"
}

output "worker_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-worker-nodes.*.network_interface.0.ipv4_address)}"
}

output "kubeworker_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-kubeworker-nodes.*.network_interface.ipv4_address)}"
}

output "edge_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-edge-nodes.*.network_interface.0.ipv4_address)}"
}

The updated main tf - "vsphere_virtual_machine" "mi-worker-nodes" has a new disk.

variable "datacenter" {}
variable "cluster" {}
variable "pool" {}
variable "template" {}
variable "linked_clone" { default = false }
variable "ssh_user" {}
variable "ssh_key" {}
variable "consul_dc" {}
variable "datastore" {}
variable "disk_type" { default = "thin" }
variable "network_label" {}

variable "short_name" {default = "mantl"}
variable "long_name" {default = "mantl"}

variable "folder" {default = ""}
variable "control_count" {default = 3}
variable "worker_count" {default = 2}
variable "kubeworker_count" {default = 0}
variable "edge_count" {default = 2}
variable "control_volume_size" {default = 20}
variable "worker_volume_size" {default = 20}
variable "edge_volume_size" {default = 20}
variable "control_cpu" { default = 1 }
variable "worker_cpu" { default = 1 }
variable "edge_cpu" { default = 1 }
variable "control_ram" { default = 4096 }
variable "worker_ram" { default = 4096 }
variable "edge_ram" { default = 4096 }

variable "domain" { default = "" }
variable "dns_server1" { default = "" }
variable "dns_server2" { default = "" }

resource "vsphere_virtual_machine" "mi-control-nodes" {
  name = "${var.short_name}-control-${format("%02d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.control_cpu}"
  memory = "${var.control_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    ##TODO - make this a var and include default false, add note to vsphere-sample.tf
    use_sdrs = true
    #size = "${var.control_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-control-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "control"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.control_count}"
}

resource "vsphere_virtual_machine" "mi-worker-nodes" {
  name = "${var.short_name}-worker-${format("%03d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.worker_cpu}"
  memory = "${var.worker_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.worker_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-worker-${format("%02d", count.index+1)}-disk1"
  }
  ## TODO: add docker vol to workers
  disk {
    use_sdrs = true
    ## TODO: add docker_vol_disk_size to var file
    size = "50"

    ## TODO: add docker_vol_disk_type to var file
    ##type = "${var.disk_type}"
    type = "thin"
    datastore = "${var.datastore}"
    name = "${var.short_name}-worker-${format("%02d", count.index+1)}-disk1"
  }
  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "worker"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.worker_count}"
}

resource "vsphere_virtual_machine" "mi-kubeworker-nodes" {
  name = "${var.short_name}-kubeworker-${format("%03d", count.index+1)}"

  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.worker_cpu}"
  memory = "${var.worker_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.worker_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-kubeworker-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "kubeworker"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
      user = "${var.ssh_user}"
      key_file = "${var.ssh_key}"
      host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.kubeworker_count}"
}

resource "vsphere_virtual_machine" "mi-edge-nodes" {
  name = "${var.short_name}-edge-${format("%02d", count.index+1)}"
  datacenter = "${var.datacenter}"
  folder = "${var.folder}"
  cluster = "${var.cluster}"
  resource_pool = "${var.pool}"

  vcpu = "${var.edge_cpu}"
  memory = "${var.edge_ram}"

  linked_clone = "${var.linked_clone}"

  disk {
    use_sdrs = true
    #size = "${var.edge_volume_size}"
    template = "${var.template}"
    type = "${var.disk_type}"
    datastore = "${var.datastore}"
    #name = "${var.short_name}-edge-${format("%02d", count.index+1)}-disk1"
  }

  network_interface {
    label = "${var.network_label}"
  }

  domain = "${var.domain}"
  dns_servers = ["${var.dns_server1}", "${var.dns_server2}"]

  custom_configuration_parameters = {
    role = "edge"
    ssh_user = "${var.ssh_user}"
    consul_dc = "${var.consul_dc}"
  }

  connection = {
    user = "${var.ssh_user}"
    key_file = "${var.ssh_key}"
    host = "${self.network_interface.0.ipv4_address}"
  }

  provisioner "remote-exec" {
    inline = [ "sudo hostnamectl --static set-hostname ${self.name}" ]
  }

  count = "${var.edge_count}"
}

output "control_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-control-nodes.*.network_interface.0.ipv4_address)}"
}

output "worker_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-worker-nodes.*.network_interface.0.ipv4_address)}"
}

output "kubeworker_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-kubeworker-nodes.*.network_interface.ipv4_address)}"
}

output "edge_ips" {
  value = "${join(\",\", vsphere_virtual_machine.mi-edge-nodes.*.network_interface.0.ipv4_address)}"
}
@hashibot hashibot added the bug Type: Bug label Jun 13, 2017
@vancluever
Copy link
Contributor

This issue has been resolved in the 1.0.0 release and modifying a VM's disks will no longer force a re-creation of the resource.

@ghost ghost locked and limited conversation to collaborators Apr 19, 2020
Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
bug Type: Bug
Projects
None yet
Development

No branches or pull requests

2 participants