Skip to content

Commit

Permalink
Merge remote-tracking branch 'hashi_origin/master' into 2544-terrafor…
Browse files Browse the repository at this point in the history
…m-s3-policy
  • Loading branch information
m-s-austin committed May 14, 2015
2 parents 31ffdea + 8a3b75d commit 0d3d51a
Show file tree
Hide file tree
Showing 18 changed files with 236 additions and 26 deletions.
6 changes: 4 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,18 @@ FEATURES:

IMPROVEMENTS:

* **New config function: `formatlist`** - Format lists in a similar way to `format`.
Useful for creating URLs from a list of IPs. [GH-1829]
* provider/aws: `aws_s3_bucket` exports `hosted_zone_id` and `region` [GH-1865]
* provider/aws: `aws_route53_record` exports `fqdn` [GH-1847]
* provider/google: `google_compute_instance` `scratch` attribute added [GH-1920]
* **New config function: `formatlist`** - Format lists in a similar way to `format`.
Useful for creating URLs from a list of IPs. [GH-1829]

BUG FIXES:

* core: fix "resource not found" for interpolation issues with modules
* core: fix unflattenable error for orphans [GH-1922]
* core: fix deadlock with create-before-destroy + modules [GH-1949]
* core: fix "no roots found" error with create-before-destroy [GH-1953]
* command/push: local vars override remote ones [GH-1881]
* provider/aws: Mark `aws_security_group` description as `ForceNew` [GH-1871]
* provider/aws: `aws_db_instance` ARN value is correct [GH-1910]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ func resourceBlockStorageVolumeV1() *schema.Resource {
Type: schema.TypeMap,
Optional: true,
ForceNew: false,
Computed: true,
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,6 @@ func resourceComputeInstanceV2() *schema.Resource {
Schema: map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"volume_id": &schema.Schema{
Expand Down Expand Up @@ -955,7 +954,6 @@ func resourceComputeVolumeAttachmentHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["volume_id"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["device"].(string)))
return hashcode.String(buf.String())
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func TestAccComputeV2Instance_basic(t *testing.T) {
})
}

func _TestAccComputeV2Instance_volumeAttach(t *testing.T) {
func TestAccComputeV2Instance_volumeAttach(t *testing.T) {
var instance servers.Server
var volume volumes.Volume

Expand Down Expand Up @@ -229,6 +229,7 @@ var testAccComputeV2Instance_volumeAttach = fmt.Sprintf(`
resource "openstack_compute_instance_v2" "foo" {
region = "%s"
name = "terraform-test"
security_groups = ["default"]
volume {
volume_id = "${openstack_blockstorage_volume_v1.myvol.id}"
}
Expand Down
7 changes: 7 additions & 0 deletions command/push.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,13 @@ Options:
-token=<token> Access token to use to upload. If blank or unspecified,
the ATLAS_TOKEN environmental variable will be used.
-var 'foo=bar' Set a variable in the Terraform configuration. This
flag can be set multiple times.
-var-file=foo Set variables in the Terraform configuration from
a file. If "terraform.tfvars" is present, it will be
automatically loaded if this flag is not specified.
-vcs=true If true (default), push will upload only files
comitted to your VCS, if detected.
Expand Down
87 changes: 87 additions & 0 deletions terraform/context_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,43 @@ func TestContext2Plan(t *testing.T) {
}
}

func TestContext2Plan_createBefore_maintainRoot(t *testing.T) {
m := testModule(t, "plan-cbd-maintain-root")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
Variables: map[string]string{
"in": "a,b,c",
},
})

plan, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}

actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
CREATE: aws_instance.bar.0
CREATE: aws_instance.bar.1
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%s, got:\n%s", expected, actual)
}
}

func TestContext2Plan_emptyDiff(t *testing.T) {
m := testModule(t, "plan-empty")
p := testProvider("aws")
Expand Down Expand Up @@ -139,6 +176,56 @@ func TestContext2Plan_moduleCycle(t *testing.T) {
}
}

func TestContext2Plan_moduleDeadlock(t *testing.T) {
m := testModule(t, "plan-module-deadlock")
p := testProvider("aws")
p.DiffFn = testDiffFn
timeout := make(chan bool, 1)
done := make(chan bool, 1)
go func() {
time.Sleep(3 * time.Second)
timeout <- true
}()
go func() {
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})

plan, err := ctx.Plan()
done <- true
if err != nil {
t.Fatalf("err: %s", err)
}

actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(`
DIFF:
module.child:
CREATE: aws_instance.foo.0
CREATE: aws_instance.foo.1
CREATE: aws_instance.foo.2
STATE:
<no state>
`)
if actual != expected {
t.Fatalf("expected:\n%sgot:\n%s", expected, actual)
}
}()

select {
case <-timeout:
t.Fatalf("timed out! probably deadlock")
case <-done:
// ok
}
}

func TestContext2Plan_moduleInput(t *testing.T) {
m := testModule(t, "plan-module-input")
p := testProvider("aws")
Expand Down
8 changes: 8 additions & 0 deletions terraform/graph_config_node_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,13 +311,17 @@ func (n *GraphNodeConfigResourceFlat) DestroyNode(mode GraphNodeDestroyMode) Gra
return &graphNodeResourceDestroyFlat{
graphNodeResourceDestroy: node,
PathValue: n.PathValue,
FlatCreateNode: n,
}
}

type graphNodeResourceDestroyFlat struct {
*graphNodeResourceDestroy

PathValue []string

// Needs to be able to properly yield back a flattened create node to prevent
FlatCreateNode *GraphNodeConfigResourceFlat
}

func (n *graphNodeResourceDestroyFlat) Name() string {
Expand All @@ -329,6 +333,10 @@ func (n *graphNodeResourceDestroyFlat) Path() []string {
return n.PathValue
}

func (n *graphNodeResourceDestroyFlat) CreateNode() dag.Vertex {
return n.FlatCreateNode
}

// graphNodeResourceDestroy represents the logical destruction of a
// resource. This node doesn't mean it will be destroyed for sure, but
// instead that if a destroy were to happen, it must happen at this point.
Expand Down
2 changes: 1 addition & 1 deletion terraform/graph_dot.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func graphDotFindOrigins(g *Graph) ([]dag.Vertex, error) {
}

if len(origin) == 0 {
return nil, fmt.Errorf("No DOT origin nodes found.\nGraph: %s", g)
return nil, fmt.Errorf("No DOT origin nodes found.\nGraph: %s", g.String())
}

return origin, nil
Expand Down
13 changes: 13 additions & 0 deletions terraform/test-fixtures/plan-cbd-maintain-root/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
resource "aws_instance" "foo" {
count = "2"
lifecycle { create_before_destroy = true }
}

resource "aws_instance" "bar" {
count = "2"
lifecycle { create_before_destroy = true }
}

output "out" {
value = "${aws_instance.foo.0.id}"
}
4 changes: 4 additions & 0 deletions terraform/test-fixtures/plan-module-deadlock/child/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
resource "aws_instance" "foo" {
count = "${length("abc")}"
lifecycle { create_before_destroy = true }
}
3 changes: 3 additions & 0 deletions terraform/test-fixtures/plan-module-deadlock/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
module "child" {
source = "./child"
}
12 changes: 9 additions & 3 deletions terraform/transform_destroy.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
package terraform

import (
"github.com/hashicorp/terraform/dag"
)
import "github.com/hashicorp/terraform/dag"

type GraphNodeDestroyMode byte

Expand Down Expand Up @@ -193,6 +191,14 @@ func (t *CreateBeforeDestroyTransformer) Transform(g *Graph) error {
// This ensures that.
for _, sourceRaw := range g.UpEdges(cn).List() {
source := sourceRaw.(dag.Vertex)

// If the graph has a "root" node (one added by a RootTransformer and not
// just a resource that happens to have no ancestors), we don't want to
// add any edges to it, because then it ceases to be a root.
if _, ok := source.(graphNodeRoot); ok {
continue
}

connect = append(connect, dag.BasicEdge(dn, source))
}

Expand Down
24 changes: 24 additions & 0 deletions terraform/transform_expand.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package terraform

import (
"fmt"
"log"

"github.com/hashicorp/terraform/dag"
Expand Down Expand Up @@ -59,3 +60,26 @@ func (n *GraphNodeBasicSubgraph) Name() string {
func (n *GraphNodeBasicSubgraph) Subgraph() *Graph {
return n.Graph
}

func (n *GraphNodeBasicSubgraph) Flatten(p []string) (dag.Vertex, error) {
return &graphNodeBasicSubgraphFlat{
GraphNodeBasicSubgraph: n,
PathValue: p,
}, nil
}

// Same as GraphNodeBasicSubgraph, but for flattening
type graphNodeBasicSubgraphFlat struct {
*GraphNodeBasicSubgraph

PathValue []string
}

func (n *graphNodeBasicSubgraphFlat) Name() string {
return fmt.Sprintf(
"%s.%s", modulePrefixStr(n.PathValue), n.GraphNodeBasicSubgraph.Name())
}

func (n *graphNodeBasicSubgraphFlat) Path() []string {
return n.PathValue
}
53 changes: 53 additions & 0 deletions terraform/transform_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,14 @@ func (n *graphNodeDisabledProvider) EvalTree() EvalNode {
}
}

// GraphNodeFlattenable impl.
func (n *graphNodeDisabledProvider) Flatten(p []string) (dag.Vertex, error) {
return &graphNodeDisabledProviderFlat{
graphNodeDisabledProvider: n,
PathValue: p,
}, nil
}

func (n *graphNodeDisabledProvider) Name() string {
return fmt.Sprintf("%s (disabled)", dag.VertexName(n.GraphNodeProvider))
}
Expand Down Expand Up @@ -205,6 +213,51 @@ func (n *graphNodeDisabledProvider) ProviderConfig() *config.RawConfig {
return n.GraphNodeProvider.ProviderConfig()
}

// Same as graphNodeDisabledProvider, but for flattening
type graphNodeDisabledProviderFlat struct {
*graphNodeDisabledProvider

PathValue []string
}

func (n *graphNodeDisabledProviderFlat) Name() string {
return fmt.Sprintf(
"%s.%s", modulePrefixStr(n.PathValue), n.graphNodeDisabledProvider.Name())
}

func (n *graphNodeDisabledProviderFlat) Path() []string {
return n.PathValue
}

func (n *graphNodeDisabledProviderFlat) ProviderName() string {
return fmt.Sprintf(
"%s.%s", modulePrefixStr(n.PathValue),
n.graphNodeDisabledProvider.ProviderName())
}

// GraphNodeDependable impl.
func (n *graphNodeDisabledProviderFlat) DependableName() []string {
return []string{n.Name()}
}

func (n *graphNodeDisabledProviderFlat) DependentOn() []string {
var result []string

// If we're in a module, then depend on our parent's provider
if len(n.PathValue) > 1 {
prefix := modulePrefixStr(n.PathValue[:len(n.PathValue)-1])
if prefix != "" {
prefix += "."
}

result = append(result, fmt.Sprintf(
"%s%s",
prefix, n.graphNodeDisabledProvider.Name()))
}

return result
}

type graphNodeMissingProvider struct {
ProviderNameValue string
}
Expand Down
5 changes: 4 additions & 1 deletion website/source/docs/configuration/interpolation.html.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ variables, attributes of resources, call functions, etc.
You can also perform simple math in interpolations, allowing
you to write expressions such as `${count.index+1}`.

You can escape interpolation with double dollar signs: `$${foo}`
will be rendered as a literal `${foo}`.

## Available Variables

**To reference user variables**, use the `var.` prefix followed by the
Expand Down Expand Up @@ -123,7 +126,7 @@ The supported built-in functions are:
* `split(delim, string)` - Splits the string previously created by `join`
back into a list. This is useful for pushing lists through module
outputs since they currently only support string values. Depending on the
use, the string this is being performed within may need to be wrapped
use, the string this is being performed within may need to be wrapped
in brackets to indicate that the output is actually a list, e.g.
`a_resource_param = ["${split(",", var.CSV_STRING)}"]`.
Example: `split(",", module.amod.server_ids)`
Expand Down
Loading

0 comments on commit 0d3d51a

Please sign in to comment.