diff --git a/README.md b/README.md
index 1d11555e77..be2fc71674 100644
--- a/README.md
+++ b/README.md
@@ -42,10 +42,11 @@ cluster on AWS.
This provider's versions are compatible with the following versions of Cluster API:
-||Cluster API v1alpha1 (v0.1)|
-|-|-|
-|AWS Provider v1alpha1 (v0.2)|✓|
-|AWS Provider v1alpha1 (v0.3)|✓|
+||Cluster API v1alpha1 (v0.1)|Cluster API v1alpha2 (unreleased)|
+|-|-|-|
+|AWS Provider v1alpha1 (v0.2)|✓||
+|AWS Provider v1alpha1 (v0.3)|✓||
+|AWS Provider v1alpha2 (unreleased)||✓|
This provider's versions are able to install and manage the following versions of Kubernetes:
diff --git a/cmd/manager/BUILD.bazel b/cmd/manager/BUILD.bazel
index a8b73f0937..46326e686c 100644
--- a/cmd/manager/BUILD.bazel
+++ b/cmd/manager/BUILD.bazel
@@ -26,15 +26,18 @@ go_library(
deps = [
"//pkg/apis:go_default_library",
"//pkg/cloud/aws/actuators/cluster:go_default_library",
+ "//pkg/controller:go_default_library",
"//pkg/record:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
+ "//vendor/k8s.io/klog/klogr:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/client/config:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals:go_default_library",
],
)
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
index 1d55f13357..4863674d35 100644
--- a/cmd/manager/main.go
+++ b/cmd/manager/main.go
@@ -24,15 +24,18 @@ import (
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
+ "k8s.io/klog/klogr"
+ capa "sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators/cluster"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/controller"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
- clusterapis "sigs.k8s.io/cluster-api/pkg/apis"
+ capi "sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset"
capicluster "sigs.k8s.io/cluster-api/pkg/controller/cluster"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
)
@@ -46,8 +49,9 @@ func main() {
profilerAddress := flag.String("profiler-address", "", "Bind address to expose the pprof profiler (e.g. localhost:6060)")
flag.Parse()
-
- cfg := config.GetConfigOrDie()
+ if *watchNamespace != "" {
+ klog.Infof("Watching cluster-api objects only in namespace %q for reconciliation", *watchNamespace)
+ }
if *profilerAddress != "" {
klog.Infof("Profiler listening for requests at %s", *profilerAddress)
@@ -58,16 +62,16 @@ func main() {
// Setup a Manager
syncPeriod := 10 * time.Minute
- opts := manager.Options{
- SyncPeriod: &syncPeriod,
- }
- if *watchNamespace != "" {
- opts.Namespace = *watchNamespace
- klog.Infof("Watching cluster-api objects only in namespace %q for reconciliation.", opts.Namespace)
- }
+ // Setup controller-runtime logger.
+ log.SetLogger(klogr.New())
- mgr, err := manager.New(cfg, opts)
+ // Get a config to talk to the api-server.
+ cfg := config.GetConfigOrDie()
+ mgr, err := manager.New(cfg, manager.Options{
+ SyncPeriod: &syncPeriod,
+ Namespace: *watchNamespace,
+ })
if err != nil {
klog.Fatalf("Failed to set up overall controller manager: %v", err)
}
@@ -87,24 +91,30 @@ func main() {
// Initialize cluster actuator.
clusterActuator := cluster.NewActuator(cluster.ActuatorParams{
+ Client: mgr.GetClient(),
CoreClient: coreClient,
- Client: cs.ClusterV1alpha2(),
+ ClusterClient: cs.ClusterV1alpha2(),
LoggingContext: "[cluster-actuator]",
})
// Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator)
common.RegisterClusterProvisioner("aws", clusterActuator)
- if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
+ if err := capi.AddToScheme(mgr.GetScheme()); err != nil {
klog.Fatal(err)
}
- if err := clusterapis.AddToScheme(mgr.GetScheme()); err != nil {
+ if err := capa.AddToScheme(mgr.GetScheme()); err != nil {
klog.Fatal(err)
}
capicluster.AddWithActuator(mgr, clusterActuator)
+ // Setup all Controllers.
+ if err := controller.AddToManager(mgr); err != nil {
+ klog.Fatal(err)
+ }
+
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
klog.Fatalf("Failed to run manager: %v", err)
}
diff --git a/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachines.yaml b/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachines.yaml
index 3511522f23..4c490f23cd 100644
--- a/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachines.yaml
+++ b/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachines.yaml
@@ -447,34 +447,6 @@ spec:
are additive. The actuator will ensure these tags are present, but
will not remove any other tags that may exist on the instance.
type: object
- additionalUserDataFiles:
- description: AdditionalUserDataFiles specifies extra files to be passed
- to user_data upon creation.
- items:
- description: Files defines the input for generating write_files in
- cloud-init.
- properties:
- content:
- description: Content is the actual content of the file.
- type: string
- owner:
- description: Owner specifies the ownership of the file, e.g. "root:root".
- type: string
- path:
- description: Path specifies the full path on disk where to store
- the file.
- type: string
- permissions:
- description: Permissions specifies the permissions to assign to
- the file, e.g. "0640".
- type: string
- required:
- - content
- - owner
- - path
- - permissions
- type: object
- type: array
ami:
description: AMI is the reference to the AMI from which to create the
machine instance.
@@ -527,6 +499,10 @@ spec:
keyName:
description: KeyName is the name of the SSH key to install on the instance.
type: string
+ providerID:
+ description: ProviderID is the unique identifier as specified by the
+ cloud provider.
+ type: string
publicIP:
description: 'PublicIP specifies whether the instance should get a public
IP. Precedence for this setting is as follows: 1. This field if set
@@ -572,48 +548,64 @@ spec:
status:
description: AWSMachineStatus defines the observed state of AWSMachine
properties:
- conditions:
- description: Conditions is a set of conditions associated with the Machine
- to indicate errors or other status
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
items:
- description: AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus
+ description: NodeAddress contains information for the node's address.
properties:
- lastProbeTime:
- description: LastProbeTime is the last time we probed the condition.
- format: date-time
- type: string
- lastTransitionTime:
- description: LastTransitionTime is the last time the condition
- transitioned from one status to another.
- format: date-time
- type: string
- message:
- description: Message is a human-readable message indicating details
- about last transition.
- type: string
- reason:
- description: Reason is a unique, one-word, CamelCase reason for
- the condition's last transition.
- type: string
- status:
- description: Status is the status of the condition.
+ address:
+ description: The node address.
type: string
type:
- description: Type is the type of the condition.
+ description: Node address type, one of Hostname, ExternalIP or
+ InternalIP.
type: string
required:
- - status
+ - address
- type
type: object
type: array
+ errorMessage:
+ description: "ErrorMessage will be set in the event that there is a
+ terminal problem reconciling the Machine and will contain a more verbose
+ string suitable for logging and human consumption. \n This field should
+ not be set for transitive errors that a controller faces that are
+ expected to be fixed automatically over time (like service outages),
+ but instead indicate that something is fundamentally wrong with the
+ Machine's spec or the configuration of the controller, and that manual
+ intervention is required. Examples of terminal errors would be invalid
+ combinations of settings in the spec, values that are unsupported
+ by the controller, or the responsible controller itself being critically
+ misconfigured. \n Any transient errors that occur during the reconciliation
+ of Machines can be added as events to the Machine object and/or logged
+ in the controller's output."
+ type: string
+ errorReason:
+ description: "ErrorReason will be set in the event that there is a terminal
+ problem reconciling the Machine and will contain a succinct value
+ suitable for machine interpretation. \n This field should not be set
+ for transitive errors that a controller faces that are expected to
+ be fixed automatically over time (like service outages), but instead
+ indicate that something is fundamentally wrong with the Machine's
+ spec or the configuration of the controller, and that manual intervention
+ is required. Examples of terminal errors would be invalid combinations
+ of settings in the spec, values that are unsupported by the controller,
+ or the responsible controller itself being critically misconfigured.
+ \n Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output."
+ type: string
instanceID:
description: InstanceID is the instance ID of the machine created in
- AWS
+ AWS.
type: string
instanceState:
description: InstanceState is the state of the AWS instance for this
- machine
+ machine.
type: string
+ ready:
+ description: Ready is true when the provider resource is ready.
+ type: boolean
type: object
type: object
versions:
diff --git a/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachinetemplates.yaml b/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachinetemplates.yaml
new file mode 100644
index 0000000000..8da87596ac
--- /dev/null
+++ b/config/crds/infrastructure.cluster.sigs.k8s.io_awsmachinetemplates.yaml
@@ -0,0 +1,689 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: awsmachinetemplates.infrastructure.cluster.sigs.k8s.io
+spec:
+ group: infrastructure.cluster.sigs.k8s.io
+ names:
+ kind: AWSMachineTemplate
+ plural: awsmachinetemplates
+ scope: ""
+ validation:
+ openAPIV3Schema:
+ description: AWSMachineTemplate is the Schema for the awsmachinetemplates API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: ObjectMeta is metadata that all persisted resources must have,
+ which includes all objects users must create.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: 'Annotations is an unstructured key value map stored with
+ a resource that may be set by external tools to store and retrieve
+ arbitrary metadata. They are not queryable and should be preserved
+ when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs to. This
+ is used to distinguish resources with same name and namespace in different
+ clusters. This field is not set anywhere right now and apiserver is
+ going to ignore it if set in create or update request.
+ type: string
+ creationTimestamp:
+ description: "CreationTimestamp is a timestamp representing the server
+ time when this object was created. It is not guaranteed to be set
+ in happens-before order across separate operations. Clients may not
+ set this value. It is represented in RFC3339 form and is in UTC. \n
+ Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to gracefully
+ terminate before it will be removed from the system. Only set when
+ deletionTimestamp is also set. May only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: "DeletionTimestamp is RFC 3339 date and time at which this
+ resource will be deleted. This field is set by the server when a graceful
+ deletion is requested by the user, and is not directly settable by
+ a client. The resource is expected to be deleted (no longer visible
+ from resource lists, and not reachable by name) after the time in
+ this field, once the finalizers list is empty. As long as the finalizers
+ list contains items, deletion is blocked. Once the deletionTimestamp
+ is set, this value may not be unset or be set further into the future,
+ although it may be shortened or the resource may be deleted prior
+ to this time. For example, a user may request that a pod is deleted
+ in 30 seconds. The Kubelet will react by sending a graceful termination
+ signal to the containers in the pod. After that 30 seconds, the Kubelet
+ will send a hard termination signal (SIGKILL) to the container and
+ after cleanup, remove the pod from the API. In the presence of network
+ partitions, this object may still exist after this timestamp, until
+ an administrator or automated process can determine the resource is
+ fully terminated. If not set, graceful deletion of the object has
+ not been requested. \n Populated by the system when a graceful deletion
+ is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted from the registry.
+ Each entry is an identifier for the responsible component that will
+ remove the entry from the list. If the deletionTimestamp of the object
+ is non-nil, entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: "GenerateName is an optional prefix, used by the server,
+ to generate a unique name ONLY IF the Name field has not been provided.
+ If this field is used, the name returned to the client will be different
+ than the name passed. This value will also be combined with a unique
+ suffix. The provided value has the same validation rules as the Name
+ field, and may be truncated by the length of the suffix required to
+ make the value unique on the server. \n If this field is specified
+ and the generated name exists, the server will NOT return a 409 -
+ instead, it will either return 201 Created or 500 with Reason ServerTimeout
+ indicating a unique name could not be found in the time allotted,
+ and the client should retry (optionally after the time indicated in
+ the Retry-After header). \n Applied only if Name is not specified.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
+ type: string
+ generation:
+ description: A sequence number representing a specific generation of
+ the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: "An initializer is a controller which enforces some system
+ invariant at object creation time. This field is a list of initializers
+ that have not yet acted on this object. If nil or empty, this object
+ has been completely initialized. Otherwise, the object is considered
+ uninitialized and is hidden (in list/watch and get calls) from clients
+ that haven't explicitly asked to observe uninitialized objects. \n
+ When an object is created, the system will populate this list with
+ the current set of initializers. Only privileged users may set or
+ modify this list. Once it is empty, it may not be modified further
+ by any user. \n DEPRECATED - initializers are an alpha field and will
+ be removed in v1.15."
+ properties:
+ pending:
+ description: Pending is a list of initializers that must execute
+ in order before this object is visible. When the last pending
+ initializer is removed, and no failing result is set, the initializers
+ struct will be set to nil and the object is considered as initialized
+ and visible to all clients.
+ items:
+ description: Initializer is information about an initializer that
+ has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible for initializing
+ this object.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ result:
+ description: If result is set with the Failure field, the object
+ will be persisted to storage and then deleted, ensuring that other
+ clients can observe the deletion.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this
+ representation of an object. Servers should convert recognized
+ schemas to the latest internal value, and may reject unrecognized
+ values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this status, 0 if
+ not set.
+ format: int32
+ type: integer
+ details:
+ description: Extended data associated with the reason. Each
+ reason may define its own extended details. This field is
+ optional and the data returned is not guaranteed to conform
+ to any schema except that defined by the reason type.
+ properties:
+ causes:
+ description: The Causes array includes more details associated
+ with the StatusReason failure. Not all StatusReasons may
+ provide detailed causes.
+ items:
+ description: StatusCause provides more information about
+ an api.Status failure, including cases when multiple
+ errors are encountered.
+ properties:
+ field:
+ description: "The field of the resource that has caused
+ this error, as named by its JSON serialization.
+ May include dot and postfix notation for nested
+ attributes. Arrays are zero-indexed. Fields may
+ appear more than once in an array of causes due
+ to fields having multiple errors. Optional. \n Examples:
+ \ \"name\" - the field \"name\" on the current
+ resource \"items[0].name\" - the field \"name\"
+ on the first array entry in \"items\""
+ type: string
+ message:
+ description: A human-readable description of the cause
+ of the error. This field may be presented as-is
+ to a reader.
+ type: string
+ reason:
+ description: A machine-readable description of the
+ cause of the error. If this value is empty there
+ is no information available.
+ type: string
+ type: object
+ type: array
+ group:
+ description: The group attribute of the resource associated
+ with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource associated
+ with the status StatusReason. On some operations may differ
+ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource associated
+ with the status StatusReason (when there is a single name
+ which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds before the
+ operation should be retried. Some errors may indicate
+ the client must take an alternate action - for those errors
+ this field may indicate how long to wait before taking
+ the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there is a single
+ resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ type: object
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint
+ the client submits requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the status of this
+ operation.
+ type: string
+ metadata:
+ description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ properties:
+ continue:
+ description: continue may be set if the user set a limit
+ on the number of items returned, and indicates that the
+ server has more data available. The value is opaque and
+ may be used to issue another request to the endpoint that
+ served this list to retrieve the next set of available
+ objects. Continuing a consistent list may not be possible
+ if the server configuration has changed or more than a
+ few minutes have passed. The resourceVersion field returned
+ when using this continue value will be identical to the
+ value in the first response, unless you have received
+ this token from an error message.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s internal
+ version of this object that can be used by clients to
+ determine when objects have changed. Value must be treated
+ as opaque by clients and passed unmodified back to the
+ server. Populated by the system. Read-only. More info:
+ https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ type: object
+ reason:
+ description: A machine-readable description of why this operation
+ is in the "Failure" status. If this value is empty there is
+ no information available. A Reason clarifies an HTTP status
+ code but does not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success" or
+ "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ type: object
+ required:
+ - pending
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: 'Map of string keys and values that can be used to organize
+ and categorize (scope and select) objects. May match selectors of
+ replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ managedFields:
+ description: "ManagedFields maps workflow-id and version to the set
+ of fields that are managed by that workflow. This is mostly for internal
+ housekeeping, and users typically shouldn't need to set or understand
+ this field. A workflow can be the user's name, a controller's name,
+ or the name of a specific apply path like \"ci-cd\". The set of fields
+ is always in the version that the workflow used when modifying the
+ object. \n This field is alpha and can be changed or removed without
+ notice."
+ items:
+ description: ManagedFieldsEntry is a workflow-id, a FieldSet and the
+ group version of the resource that the fieldset applies to.
+ properties:
+ apiVersion:
+ description: APIVersion defines the version of this resource that
+ this field set applies to. The format is "group/version" just
+ like the top-level APIVersion field. It is necessary to track
+ the version of a field set because it cannot be automatically
+ converted.
+ type: string
+ fields:
+ additionalProperties: true
+ description: Fields identifies a set of fields.
+ type: object
+ manager:
+ description: Manager is an identifier of the workflow managing
+ these fields.
+ type: string
+ operation:
+ description: Operation is the type of operation which lead to
+ this ManagedFieldsEntry being created. The only valid values
+ for this field are 'Apply' and 'Update'.
+ type: string
+ time:
+ description: Time is timestamp of when these fields were set.
+ It should always be empty if Operation is 'Apply'
+ format: date-time
+ type: string
+ type: object
+ type: array
+ name:
+ description: 'Name must be unique within a namespace. Is required when
+ creating resources, although some resources may allow a client to
+ request the generation of an appropriate name automatically. Name
+ is primarily intended for creation idempotence and configuration definition.
+ Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: "Namespace defines the space within each name must be unique.
+ An empty namespace is equivalent to the \"default\" namespace, but
+ \"default\" is the canonical representation. Not all objects are required
+ to be scoped to a namespace - the value of this field for those objects
+ will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
+ http://kubernetes.io/docs/user-guide/namespaces"
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If ALL objects
+ in the list have been deleted, this object will be garbage collected.
+ If this object is managed by a controller, then an entry in this list
+ will point to this controller, with the controller field set to true.
+ There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information to let you
+ identify an owning object. An owning object must be in the same
+ namespace as the dependent, or be cluster-scoped, so there is no
+ namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from the key-value
+ store until this reference is removed. Defaults to false. To
+ set this field, a user needs "delete" permission of the owner,
+ otherwise 422 (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the managing controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: object
+ type: array
+ resourceVersion:
+ description: "An opaque value that represents the internal version of
+ this object that can be used by clients to determine when objects
+ have changed. May be used for optimistic concurrency, change detection,
+ and the watch operation on a resource or set of resources. Clients
+ must treat these values as opaque and passed unmodified back to the
+ server. They may only be valid for a particular resource or set of
+ resources. \n Populated by the system. Read-only. Value must be treated
+ as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object. Populated by
+ the system. Read-only.
+ type: string
+ uid:
+ description: "UID is the unique in time and space value for this object.
+ It is typically generated by the server on successful creation of
+ a resource and is not allowed to change on PUT operations. \n Populated
+ by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
+ type: string
+ type: object
+ spec:
+ description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
+ properties:
+ template:
+ description: AWSMachineTemplateResource describes the data needed to
+ create am AWSMachine from a template
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the
+ latest internal value, and may reject unrecognized values. More
+ info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint
+ the client submits requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: Standard object's metadata.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: 'Annotations is an unstructured key value map stored
+ with a resource that may be set by external tools to store
+ and retrieve arbitrary metadata. They are not queryable and
+ should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ generateName:
+ description: "GenerateName is an optional prefix, used by the
+ server, to generate a unique name ONLY IF the Name field has
+ not been provided. If this field is used, the name returned
+ to the client will be different than the name passed. This
+ value will also be combined with a unique suffix. The provided
+ value has the same validation rules as the Name field, and
+ may be truncated by the length of the suffix required to make
+ the value unique on the server. \n If this field is specified
+ and the generated name exists, the server will NOT return
+ a 409 - instead, it will either return 201 Created or 500
+ with Reason ServerTimeout indicating a unique name could not
+ be found in the time allotted, and the client should retry
+ (optionally after the time indicated in the Retry-After header).
+ \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: 'Map of string keys and values that can be used
+ to organize and categorize (scope and select) objects. May
+ match selectors of replication controllers and services. More
+ info: http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is required
+ when creating resources, although some resources may allow
+ a client to request the generation of an appropriate name
+ automatically. Name is primarily intended for creation idempotence
+ and configuration definition. Cannot be updated. More info:
+ http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: "Namespace defines the space within each name must
+ be unique. An empty namespace is equivalent to the \"default\"
+ namespace, but \"default\" is the canonical representation.
+ Not all objects are required to be scoped to a namespace -
+ the value of this field for those objects will be empty. \n
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If ALL
+ objects in the list have been deleted, this object will be
+ garbage collected. If this object is managed by a controller,
+ then an entry in this list will point to this controller,
+ with the controller field set to true. There cannot be more
+ than one managing controller.
+ items:
+ description: OwnerReference contains enough information to
+ let you identify an owning object. An owning object must
+ be in the same namespace as the dependent, or be cluster-scoped,
+ so there is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from the
+ key-value store until this reference is removed. Defaults
+ to false. To set this field, a user needs "delete" permission
+ of the owner, otherwise 422 (Unprocessable Entity) will
+ be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the managing
+ controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: object
+ type: array
+ type: object
+ spec:
+ description: Spec is the specification of the desired behavior of
+ the machine.
+ properties:
+ additionalSecurityGroups:
+ description: AdditionalSecurityGroups is an array of references
+ to security groups that should be applied to the instance.
+ These security groups would be set in addition to any security
+ groups defined at the cluster level or in the actuator.
+ items:
+ description: AWSResourceReference is a reference to a specific
+ AWS resource by ID, ARN, or filters. Only one of ID, ARN
+ or Filters may be specified. Specifying more than one will
+ result in a validation error.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used
+ to identify a resource They are applied according to
+ the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: AdditionalTags is the set of tags to add to an
+ instance, in addition to the ones added by default by the
+ actuator. These tags are additive. The actuator will ensure
+ these tags are present, but will not remove any other tags
+ that may exist on the instance.
+ type: object
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used to
+ identify a resource They are applied according to the
+ rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ availabilityZone:
+ description: AvailabilityZone is references the AWS availability
+ zone to use for this instance. If multiple subnets are matched
+ for the availability zone, the first one return is picked.
+ type: string
+ iamInstanceProfile:
+ description: IAMInstanceProfile is a name of an IAM instance
+ profile to assign to the instance
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ keyName:
+ description: KeyName is the name of the SSH key to install on
+ the instance.
+ type: string
+ providerID:
+ description: ProviderID is the unique identifier as specified
+ by the cloud provider.
+ type: string
+ publicIP:
+ description: 'PublicIP specifies whether the instance should
+ get a public IP. Precedence for this setting is as follows:
+ 1. This field if set 2. Cluster/flavor setting 3. Subnet default'
+ type: boolean
+ rootDeviceSize:
+ description: RootDeviceSize is the size of the root volume.
+ format: int64
+ type: integer
+ subnet:
+ description: Subnet is a reference to the subnet to use for
+ this instance. If not specified, the cluster subnet will be
+ used.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used to
+ identify a resource They are applied according to the
+ rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: object
+ required:
+ - spec
+ type: object
+ required:
+ - template
+ type: object
+ type: object
+ versions:
+ - name: v1alpha2
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 78ec62528f..6339a7daa5 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -7,52 +7,54 @@ metadata:
name: manager-role
rules:
- apiGroups:
- - cluster.sigs.k8s.io
+ - ""
resources:
- - clusters
- - clusters/status
+ - secrets
verbs:
- create
- - delete
- get
- list
- - patch
- - update
- watch
- apiGroups:
- - infrastructure.cluster.sigs.k8s.io
+ - ""
resources:
- - awsmachines
- - awsmachines/status
+ - configmaps
verbs:
- create
- delete
- get
- - list
- - patch
- - update
- - watch
- apiGroups:
- ""
resources:
- - secrets
+ - events
verbs:
- create
+ - delete
- get
- list
+ - patch
+ - update
- watch
- apiGroups:
- - ""
+ - awsprovider.k8s.io
resources:
- - configmaps
+ - awsclusterproviderconfigs
+ - awsclusterproviderstatuses
verbs:
- create
- delete
- get
+ - list
+ - patch
+ - update
+ - watch
- apiGroups:
- - ""
+ - cluster.sigs.k8s.io
resources:
- - events
+ - clusters
+ - clusters/status
+ - machines
+ - machines/status
verbs:
- create
- delete
@@ -62,10 +64,10 @@ rules:
- update
- watch
- apiGroups:
- - awsprovider.k8s.io
+ - infrastructure.cluster.sigs.k8s.io
resources:
- - awsclusterproviderconfigs
- - awsclusterproviderstatuses
+ - awsmachines
+ - awsmachines/status
verbs:
- create
- delete
diff --git a/config/samples/infrastructure_v1alpha2_awsmachinetemplate.yaml b/config/samples/infrastructure_v1alpha2_awsmachinetemplate.yaml
new file mode 100644
index 0000000000..654b17fdd5
--- /dev/null
+++ b/config/samples/infrastructure_v1alpha2_awsmachinetemplate.yaml
@@ -0,0 +1,9 @@
+apiVersion: infrastructure.cluster.sigs.k8s.io/v1alpha2
+kind: AWSMachineTemplate
+metadata:
+ labels:
+ controller-tools.k8s.io: "1.0"
+ name: awsmachinetemplate-sample
+spec:
+ # Add fields here
+ foo: bar
diff --git a/go.mod b/go.mod
index 06870d482f..a5ba5173f4 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,8 @@ require (
k8s.io/code-generator v0.0.0-20190311093542-50b561225d70
k8s.io/klog v0.3.2
k8s.io/kubernetes v1.14.2
- sigs.k8s.io/cluster-api v0.0.0-20190711203908-5ffab93802d7
+ k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5
+ sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106
sigs.k8s.io/controller-runtime v0.2.0-beta.4
sigs.k8s.io/controller-tools v0.2.0-beta.3
sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4
@@ -30,4 +31,5 @@ require (
replace (
k8s.io/api => k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190704094733-8f6ac2502e51
+ sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106
)
diff --git a/go.sum b/go.sum
index 77a7791557..853add71b1 100644
--- a/go.sum
+++ b/go.sum
@@ -304,8 +304,8 @@ k8s.io/kubernetes v1.14.2 h1:Gdq2hPpttbaJBoClIanCE6WSu4IZReA54yhkZtvPUOo=
k8s.io/kubernetes v1.14.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-sigs.k8s.io/cluster-api v0.0.0-20190711203908-5ffab93802d7 h1:rX9YZe/JrhGzJ1GDgEPHTS/R2kP3UwKkYOv06sp0Gco=
-sigs.k8s.io/cluster-api v0.0.0-20190711203908-5ffab93802d7/go.mod h1:Ahokj7Sz+yCFXObi1j1UwZ0vU0AsWFAxln74L4BWxv8=
+sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106 h1:ojKZEQek8zRkMJlqi+v0v5I2mKrm+fxiny+VYnaAqsg=
+sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106/go.mod h1:Ahokj7Sz+yCFXObi1j1UwZ0vU0AsWFAxln74L4BWxv8=
sigs.k8s.io/controller-runtime v0.2.0-beta.4 h1:S1XVfRWR1MuIXZdkYx3jN8JDw+bbQxmWZroy0i87z/A=
sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME=
sigs.k8s.io/controller-tools v0.2.0-beta.3 h1:7h1Hx+vpg79dktBILuQq/aDed4GZcdxeUwuEb59G1bw=
diff --git a/pkg/apis/awsprovider/v1alpha1/types.go b/pkg/apis/awsprovider/v1alpha1/types.go
index 6df43573d2..a34cbb3a9c 100644
--- a/pkg/apis/awsprovider/v1alpha1/types.go
+++ b/pkg/apis/awsprovider/v1alpha1/types.go
@@ -25,6 +25,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+const (
+ AnnotationClusterInfrastructureReady = "aws.cluster.sigs.k8s.io/infrastructure-ready"
+ AnnotationControlPlaneReady = "aws.cluster.sigs.k8s.io/control-plane-ready"
+ ValueReady = "true"
+)
+
// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
// a validation error.
@@ -442,9 +448,3 @@ type Instance struct {
// The tags associated with the instance.
Tags map[string]string `json:"tags,omitempty"`
}
-
-const (
- AnnotationClusterInfrastructureReady = "aws.cluster.sigs.k8s.io/infrastructure-ready"
- ValueReady = "true"
- AnnotationControlPlaneReady = "aws.cluster.sigs.k8s.io/control-plane-ready"
-)
diff --git a/pkg/apis/infrastructure/v1alpha2/BUILD.bazel b/pkg/apis/infrastructure/v1alpha2/BUILD.bazel
index 624f835aeb..2bc20f63ea 100644
--- a/pkg/apis/infrastructure/v1alpha2/BUILD.bazel
+++ b/pkg/apis/infrastructure/v1alpha2/BUILD.bazel
@@ -6,6 +6,7 @@ go_library(
"awsclusterproviderconfig_types.go",
"awsclusterproviderstatus_types.go",
"awsmachine_types.go",
+ "awsmachinetemplate_types.go",
"doc.go",
"register.go",
"tags.go",
@@ -21,6 +22,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
@@ -33,6 +35,7 @@ go_test(
"awsclusterproviderconfig_types_test.go",
"awsclusterproviderstatus_types_test.go",
"awsmachine_types_test.go",
+ "awsmachinetemplate_types_test.go",
"v1alpha2_suite_test.go",
],
embed = [":go_default_library"],
diff --git a/pkg/apis/infrastructure/v1alpha2/awsmachine_types.go b/pkg/apis/infrastructure/v1alpha2/awsmachine_types.go
index 864c5ca44b..a87c50701e 100644
--- a/pkg/apis/infrastructure/v1alpha2/awsmachine_types.go
+++ b/pkg/apis/infrastructure/v1alpha2/awsmachine_types.go
@@ -17,12 +17,16 @@ limitations under the License.
package v1alpha2
import (
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- userdata "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/userdata"
+ "sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
)
// AWSMachineSpec defines the desired state of AWSMachine
type AWSMachineSpec struct {
+ // ProviderID is the unique identifier as specified by the cloud provider.
+ ProviderID *string `json:"providerID,omitempty"`
+
// AMI is the reference to the AMI from which to create the machine instance.
AMI AWSResourceReference `json:"ami,omitempty"`
@@ -74,26 +78,61 @@ type AWSMachineSpec struct {
// RootDeviceSize is the size of the root volume.
// +optional
RootDeviceSize int64 `json:"rootDeviceSize,omitempty"`
-
- // AdditionalUserDataFiles specifies extra files to be passed to user_data upon creation.
- // +optional
- AdditionalUserDataFiles []userdata.Files `json:"additionalUserDataFiles,omitempty"`
}
// AWSMachineStatus defines the observed state of AWSMachine
type AWSMachineStatus struct {
- // InstanceID is the instance ID of the machine created in AWS
+ // Ready is true when the provider resource is ready.
+ Ready *bool `json:"ready,omitempty"`
+
+ // Addresses contains the AWS instance associated addresses.
+ Addresses []v1.NodeAddress `json:"addresses,omitempty"`
+
+ // InstanceID is the instance ID of the machine created in AWS.
// +optional
InstanceID *string `json:"instanceID,omitempty"`
- // InstanceState is the state of the AWS instance for this machine
+ // InstanceState is the state of the AWS instance for this machine.
// +optional
InstanceState *InstanceState `json:"instanceState,omitempty"`
- // Conditions is a set of conditions associated with the Machine to indicate
- // errors or other status
+ // ErrorReason will be set in the event that there is a terminal problem
+ // reconciling the Machine and will contain a succinct value suitable
+ // for machine interpretation.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the Machine's spec or the configuration of
+ // the controller, and that manual intervention is required. Examples
+ // of terminal errors would be invalid combinations of settings in the
+ // spec, values that are unsupported by the controller, or the
+ // responsible controller itself being critically misconfigured.
+ //
+ // Any transient errors that occur during the reconciliation of Machines
+ // can be added as events to the Machine object and/or logged in the
+ // controller's output.
+ // +optional
+ ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"`
+
+ // ErrorMessage will be set in the event that there is a terminal problem
+ // reconciling the Machine and will contain a more verbose string suitable
+ // for logging and human consumption.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the Machine's spec or the configuration of
+ // the controller, and that manual intervention is required. Examples
+ // of terminal errors would be invalid combinations of settings in the
+ // spec, values that are unsupported by the controller, or the
+ // responsible controller itself being critically misconfigured.
+ //
+ // Any transient errors that occur during the reconciliation of Machines
+ // can be added as events to the Machine object and/or logged in the
+ // controller's output.
// +optional
- Conditions []AWSMachineProviderCondition `json:"conditions,omitempty"`
+ ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +genclient
diff --git a/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types.go b/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types.go
new file mode 100644
index 0000000000..43f6ec1179
--- /dev/null
+++ b/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+)
+
+// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
+type AWSMachineTemplateSpec struct {
+ Template AWSMachineTemplateResource `json:"template"`
+}
+
+// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template
+type AWSMachineTemplateResource struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ clusterv1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the specification of the desired behavior of the machine.
+ Spec AWSMachineSpec `json:"spec"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AWSMachineTemplate is the Schema for the awsmachinetemplates API
+// +k8s:openapi-gen=true
+type AWSMachineTemplate struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AWSMachineTemplateList contains a list of AWSMachineTemplate
+type AWSMachineTemplateList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []AWSMachineTemplate `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{})
+}
diff --git a/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types_test.go b/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types_test.go
new file mode 100644
index 0000000000..b7c7f44e80
--- /dev/null
+++ b/pkg/apis/infrastructure/v1alpha2/awsmachinetemplate_types_test.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha2
+
+import (
+ "testing"
+
+ "github.com/onsi/gomega"
+ "golang.org/x/net/context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+func TestStorageAWSMachineTemplate(t *testing.T) {
+ key := types.NamespacedName{
+ Name: "foo",
+ Namespace: "default",
+ }
+ created := &AWSMachineTemplate{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ }}
+ g := gomega.NewGomegaWithT(t)
+
+ // Test Create
+ fetched := &AWSMachineTemplate{}
+ g.Expect(c.Create(context.TODO(), created)).To(gomega.Succeed())
+
+ g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed())
+ g.Expect(fetched).To(gomega.Equal(created))
+
+ // Test Updating the Labels
+ updated := fetched.DeepCopy()
+ updated.Labels = map[string]string{"hello": "world"}
+ g.Expect(c.Update(context.TODO(), updated)).To(gomega.Succeed())
+
+ g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed())
+ g.Expect(fetched).To(gomega.Equal(updated))
+
+ // Test Delete
+ g.Expect(c.Delete(context.TODO(), fetched)).To(gomega.Succeed())
+ g.Expect(c.Get(context.TODO(), key, fetched)).ToNot(gomega.Succeed())
+}
diff --git a/pkg/apis/infrastructure/v1alpha2/types.go b/pkg/apis/infrastructure/v1alpha2/types.go
index c54b198e22..d7c17bf06a 100644
--- a/pkg/apis/infrastructure/v1alpha2/types.go
+++ b/pkg/apis/infrastructure/v1alpha2/types.go
@@ -20,9 +20,12 @@ import (
"fmt"
"sort"
"time"
+)
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+const (
+ AnnotationClusterInfrastructureReady = "aws.infrastructure.cluster.sigs.k8s.io/infrastructure-ready"
+ AnnotationControlPlaneReady = "aws.infrastructure.cluster.sigs.k8s.io/control-plane-ready"
+ ValueReady = "true"
)
// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
@@ -63,26 +66,6 @@ const (
MachineCreated AWSMachineProviderConditionType = "MachineCreated"
)
-// AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus
-type AWSMachineProviderCondition struct {
- // Type is the type of the condition.
- Type AWSMachineProviderConditionType `json:"type"`
- // Status is the status of the condition.
- Status corev1.ConditionStatus `json:"status"`
- // LastProbeTime is the last time we probed the condition.
- // +optional
- LastProbeTime metav1.Time `json:"lastProbeTime"`
- // LastTransitionTime is the last time the condition transitioned from one status to another.
- // +optional
- LastTransitionTime metav1.Time `json:"lastTransitionTime"`
- // Reason is a unique, one-word, CamelCase reason for the condition's last transition.
- // +optional
- Reason string `json:"reason"`
- // Message is a human-readable message indicating details about last transition.
- // +optional
- Message string `json:"message"`
-}
-
// Network encapsulates AWS networking resources.
type Network struct {
// SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
@@ -442,9 +425,3 @@ type Instance struct {
// The tags associated with the instance.
Tags map[string]string `json:"tags,omitempty"`
}
-
-const (
- AnnotationClusterInfrastructureReady = "aws.cluster.sigs.k8s.io/infrastructure-ready"
- ValueReady = "true"
- AnnotationControlPlaneReady = "aws.cluster.sigs.k8s.io/control-plane-ready"
-)
diff --git a/pkg/apis/infrastructure/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/infrastructure/v1alpha2/zz_generated.deepcopy.go
index b9650705be..1317417d7b 100644
--- a/pkg/apis/infrastructure/v1alpha2/zz_generated.deepcopy.go
+++ b/pkg/apis/infrastructure/v1alpha2/zz_generated.deepcopy.go
@@ -21,8 +21,10 @@ limitations under the License.
package v1alpha2
import (
+ v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
userdata "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/userdata"
+ common "sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -166,27 +168,14 @@ func (in *AWSMachineList) DeepCopyObject() runtime.Object {
return nil
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineProviderCondition) DeepCopyInto(out *AWSMachineProviderCondition) {
- *out = *in
- in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
- in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderCondition.
-func (in *AWSMachineProviderCondition) DeepCopy() *AWSMachineProviderCondition {
- if in == nil {
- return nil
- }
- out := new(AWSMachineProviderCondition)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
*out = *in
+ if in.ProviderID != nil {
+ in, out := &in.ProviderID, &out.ProviderID
+ *out = new(string)
+ **out = **in
+ }
in.AMI.DeepCopyInto(&out.AMI)
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
@@ -217,11 +206,6 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
*out = new(AWSResourceReference)
(*in).DeepCopyInto(*out)
}
- if in.AdditionalUserDataFiles != nil {
- in, out := &in.AdditionalUserDataFiles, &out.AdditionalUserDataFiles
- *out = make([]userdata.Files, len(*in))
- copy(*out, *in)
- }
return
}
@@ -238,6 +222,16 @@ func (in *AWSMachineSpec) DeepCopy() *AWSMachineSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) {
*out = *in
+ if in.Ready != nil {
+ in, out := &in.Ready, &out.Ready
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]v1.NodeAddress, len(*in))
+ copy(*out, *in)
+ }
if in.InstanceID != nil {
in, out := &in.InstanceID, &out.InstanceID
*out = new(string)
@@ -248,12 +242,15 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) {
*out = new(InstanceState)
**out = **in
}
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]AWSMachineProviderCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ if in.ErrorReason != nil {
+ in, out := &in.ErrorReason, &out.ErrorReason
+ *out = new(common.MachineStatusError)
+ **out = **in
+ }
+ if in.ErrorMessage != nil {
+ in, out := &in.ErrorMessage, &out.ErrorMessage
+ *out = new(string)
+ **out = **in
}
return
}
@@ -268,6 +265,102 @@ func (in *AWSMachineStatus) DeepCopy() *AWSMachineStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplate) DeepCopyInto(out *AWSMachineTemplate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplate.
+func (in *AWSMachineTemplate) DeepCopy() *AWSMachineTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSMachineTemplate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplateList) DeepCopyInto(out *AWSMachineTemplateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSMachineTemplate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateList.
+func (in *AWSMachineTemplateList) DeepCopy() *AWSMachineTemplateList {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSMachineTemplateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplateResource) DeepCopyInto(out *AWSMachineTemplateResource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateResource.
+func (in *AWSMachineTemplateResource) DeepCopy() *AWSMachineTemplateResource {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplateResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplateSpec) DeepCopyInto(out *AWSMachineTemplateSpec) {
+ *out = *in
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateSpec.
+func (in *AWSMachineTemplateSpec) DeepCopy() *AWSMachineTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
*out = *in
diff --git a/pkg/cloud/aws/actuators/BUILD.bazel b/pkg/cloud/aws/actuators/BUILD.bazel
index 389ee88a68..42c8676be6 100644
--- a/pkg/cloud/aws/actuators/BUILD.bazel
+++ b/pkg/cloud/aws/actuators/BUILD.bazel
@@ -4,10 +4,11 @@ go_library(
name = "go_default_library",
srcs = [
"clients.go",
+ "cluster_scope.go",
"control_plane_lock.go",
"getters.go",
"machine_scope.go",
- "scope.go",
+ "session.go",
],
importpath = "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators",
visibility = ["//visibility:public"],
@@ -21,10 +22,12 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/service/elb/elbiface:go_default_library",
"//vendor/github.com/go-logr/logr:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
- "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
- "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+ "//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/klogr:go_default_library",
+ "//vendor/k8s.io/utils/pointer:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
- "//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/errors:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/util:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
],
)
diff --git a/pkg/cloud/aws/actuators/cluster/BUILD.bazel b/pkg/cloud/aws/actuators/cluster/BUILD.bazel
index 974e150ab2..371e11f364 100644
--- a/pkg/cloud/aws/actuators/cluster/BUILD.bazel
+++ b/pkg/cloud/aws/actuators/cluster/BUILD.bazel
@@ -23,6 +23,8 @@ go_library(
"//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/controller/remote:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/errors:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/util:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
],
)
diff --git a/pkg/cloud/aws/actuators/cluster/actuator.go b/pkg/cloud/aws/actuators/cluster/actuator.go
index 203b41e5d9..cd30bf16f1 100644
--- a/pkg/cloud/aws/actuators/cluster/actuator.go
+++ b/pkg/cloud/aws/actuators/cluster/actuator.go
@@ -17,6 +17,7 @@ limitations under the License.
package cluster
import (
+ "context"
"time"
"github.com/go-logr/logr"
@@ -33,9 +34,11 @@ import (
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/elb"
"sigs.k8s.io/cluster-api-provider-aws/pkg/deployer"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
- client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2"
+ clientv1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2"
"sigs.k8s.io/cluster-api/pkg/controller/remote"
controllerError "sigs.k8s.io/cluster-api/pkg/errors"
+ "sigs.k8s.io/cluster-api/pkg/util"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
const waitForControlPlaneMachineDuration = 15 * time.Second //nolint
@@ -43,26 +46,28 @@ const waitForControlPlaneMachineDuration = 15 * time.Second //nolint
// Actuator is responsible for performing cluster reconciliation
type Actuator struct {
*deployer.Deployer
+ client.Client
- coreClient corev1.CoreV1Interface
- client client.ClusterV1alpha2Interface
- log logr.Logger
+ coreClient corev1.CoreV1Interface
+ clusterClient clientv1.ClusterV1alpha2Interface
+ log logr.Logger
}
// ActuatorParams holds parameter information for Actuator
type ActuatorParams struct {
+ Client client.Client
CoreClient corev1.CoreV1Interface
- Client client.ClusterV1alpha2Interface
+ ClusterClient clientv1.ClusterV1alpha2Interface
LoggingContext string
}
// NewActuator creates a new Actuator
func NewActuator(params ActuatorParams) *Actuator {
return &Actuator{
- client: params.Client,
- coreClient: params.CoreClient,
- log: klogr.New().WithName(params.LoggingContext),
- Deployer: deployer.New(deployer.Params{ScopeGetter: actuators.DefaultScopeGetter}),
+ clusterClient: params.ClusterClient,
+ coreClient: params.CoreClient,
+ log: klogr.New().WithName(params.LoggingContext),
+ Deployer: deployer.New(deployer.Params{ClusterScopeGetter: actuators.DefaultClusterScopeGetter}),
}
}
@@ -71,7 +76,10 @@ func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error {
log := a.log.WithValues("cluster-name", cluster.Name, "cluster-namespace", cluster.Namespace)
log.Info("Reconciling Cluster")
- scope, err := actuators.NewScope(actuators.ScopeParams{Cluster: cluster, Client: a.client, Logger: a.log})
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
+ Cluster: cluster,
+ Logger: a.log,
+ })
if err != nil {
return errors.Errorf("failed to create scope: %+v", err)
}
@@ -152,26 +160,25 @@ func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error {
log.Info("Cluster does not have ready annotation - checking for ready control plane machines")
- // machines, err := a.client.Machines(cluster.Namespace).List(actuators.ListOptionsForCluster(cluster.Name))
- // if err != nil {
- // return errors.Wrapf(err, "failed to list machines for cluster %q", cluster.Name)
- // }
+ machineList := &clusterv1.MachineList{}
+ if err := a.List(context.Background(), machineList, actuators.ListOptionsForCluster(cluster.Name)); err != nil {
+ return errors.Wrapf(err, "failed to retrieve machines in cluster %q", cluster.Name)
+ }
- // controlPlaneMachines := machine.GetControlPlaneMachines(machines)
+ controlPlaneMachines := util.GetControlPlaneMachinesFromList(machineList)
- // machineReady := false
- // for _, machine := range controlPlaneMachines {
- // if machine.Status.NodeRef != nil {
- // machineReady = true
- // break
- // }
- // }
+ machineReady := false
+ for _, machine := range controlPlaneMachines {
+ if machine.Status.NodeRef != nil {
+ machineReady = true
+ break
+ }
+ }
- // if !machineReady {
- // log.Info("No control plane machines are ready - requeuing cluster")
- // return &controllerError.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineDuration}
- // }
- // TODO
+ if !machineReady {
+ log.Info("No control plane machines are ready - requeuing cluster")
+ return &controllerError.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineDuration}
+ }
log.Info("Setting cluster ready annotation")
cluster.Annotations[v1alpha2.AnnotationControlPlaneReady] = v1alpha2.ValueReady
@@ -183,9 +190,9 @@ func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error {
func (a *Actuator) Delete(cluster *clusterv1.Cluster) error {
a.log.Info("Deleting cluster", "cluster-name", cluster.Name, "cluster-namespace", cluster.Namespace)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: cluster,
- Client: a.client,
+ Client: a.Client,
Logger: a.log,
})
if err != nil {
diff --git a/pkg/cloud/aws/actuators/scope.go b/pkg/cloud/aws/actuators/cluster_scope.go
similarity index 57%
rename from pkg/cloud/aws/actuators/scope.go
rename to pkg/cloud/aws/actuators/cluster_scope.go
index d827fd0c2b..8a6c0f1305 100644
--- a/pkg/cloud/aws/actuators/scope.go
+++ b/pkg/cloud/aws/actuators/cluster_scope.go
@@ -17,55 +17,32 @@ limitations under the License.
package actuators
import (
- "encoding/json"
- "reflect"
- "sync"
+ "context"
+ "fmt"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/go-logr/logr"
"github.com/pkg/errors"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/klogr"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
- client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2"
-)
-
-var (
- sessionCache sync.Map
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
const apiEndpointPort = 6443
-func sessionForRegion(region string) (*session.Session, error) {
- s, ok := sessionCache.Load(region)
- if ok {
- return s.(*session.Session), nil
- }
-
- ns, err := session.NewSession(aws.NewConfig().WithRegion(region))
- if err != nil {
- return nil, err
- }
-
- sessionCache.Store(region, ns)
- return ns, nil
-}
-
// ScopeParams defines the input parameters used to create a new Scope.
-type ScopeParams struct {
+type ClusterScopeParams struct {
AWSClients
- Cluster *clusterv1.Cluster
- Client client.ClusterV1alpha2Interface
+ Client client.Client
Logger logr.Logger
+ Cluster *clusterv1.Cluster
}
-// NewScope creates a new Scope from the supplied parameters.
+// NewClusterScope creates a new Scope from the supplied parameters.
// This is meant to be called for each different actuator iteration.
-func NewScope(params ScopeParams) (*Scope, error) {
+func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
if params.Cluster == nil {
return nil, errors.New("failed to generate new scope from nil cluster")
}
@@ -93,111 +70,94 @@ func NewScope(params ScopeParams) (*Scope, error) {
params.AWSClients.ELB = elb.New(session)
}
- var clusterClient client.ClusterInterface
- if params.Client != nil {
- clusterClient = params.Client.Clusters(params.Cluster.Namespace)
- }
-
if params.Logger == nil {
params.Logger = klogr.New().WithName("default-logger")
}
- return &Scope{
+ return &ClusterScope{
+ client: params.Client,
+ clusterPatch: client.MergeFrom(params.Cluster),
+
AWSClients: params.AWSClients,
Cluster: params.Cluster,
- ClusterCopy: params.Cluster.DeepCopy(),
- ClusterClient: clusterClient,
ClusterConfig: clusterConfig,
ClusterStatus: clusterStatus,
- Logger: params.Logger.WithName(params.Cluster.APIVersion).WithName(params.Cluster.Namespace).WithName(params.Cluster.Name),
+ Logger: params.Logger.
+ WithName(params.Cluster.APIVersion).
+ WithName(params.Cluster.Namespace).
+ WithName(fmt.Sprintf("cluster=%s", params.Cluster.Name)),
}, nil
}
-// Scope defines the basic context for an actuator to operate upon.
-type Scope struct {
+// ClusterScope defines the basic context for an actuator to operate upon.
+type ClusterScope struct {
+ logr.Logger
+ client client.Client
+ clusterPatch client.Patch
+
AWSClients
- Cluster *clusterv1.Cluster
- // ClusterCopy is used for patch generation at the end of the scope's lifecycle.
- ClusterCopy *clusterv1.Cluster
- ClusterClient client.ClusterInterface
+ Cluster *clusterv1.Cluster
ClusterConfig *v1alpha2.AWSClusterProviderSpec
ClusterStatus *v1alpha2.AWSClusterProviderStatus
- logr.Logger
}
// Network returns the cluster network object.
-func (s *Scope) Network() *v1alpha2.Network {
+func (s *ClusterScope) Network() *v1alpha2.Network {
return &s.ClusterStatus.Network
}
// VPC returns the cluster VPC.
-func (s *Scope) VPC() *v1alpha2.VPCSpec {
+func (s *ClusterScope) VPC() *v1alpha2.VPCSpec {
return &s.ClusterConfig.NetworkSpec.VPC
}
// Subnets returns the cluster subnets.
-func (s *Scope) Subnets() v1alpha2.Subnets {
+func (s *ClusterScope) Subnets() v1alpha2.Subnets {
return s.ClusterConfig.NetworkSpec.Subnets
}
// SecurityGroups returns the cluster security groups as a map, it creates the map if empty.
-func (s *Scope) SecurityGroups() map[v1alpha2.SecurityGroupRole]v1alpha2.SecurityGroup {
+func (s *ClusterScope) SecurityGroups() map[v1alpha2.SecurityGroupRole]v1alpha2.SecurityGroup {
return s.ClusterStatus.Network.SecurityGroups
}
// Name returns the cluster name.
-func (s *Scope) Name() string {
+func (s *ClusterScope) Name() string {
return s.Cluster.Name
}
// Namespace returns the cluster namespace.
-func (s *Scope) Namespace() string {
+func (s *ClusterScope) Namespace() string {
return s.Cluster.Namespace
}
// Region returns the cluster region.
-func (s *Scope) Region() string {
+func (s *ClusterScope) Region() string {
return s.ClusterConfig.Region
}
// Close closes the current scope persisting the cluster configuration and status.
-func (s *Scope) Close() {
- if s.ClusterClient == nil {
- return
- }
+func (s *ClusterScope) Close() {
+ ctx := context.Background()
+ // Patch Cluster object.
ext, err := v1alpha2.EncodeClusterSpec(s.ClusterConfig)
if err != nil {
s.Error(err, "failed encoding cluster spec")
return
}
+ s.Cluster.Spec.ProviderSpec.Value = ext
+ if err := s.client.Patch(ctx, s.Cluster, s.clusterPatch); err != nil {
+ s.Error(err, "failed to patch object")
+ return
+ }
+
+ // Patch Cluster status.
newStatus, err := v1alpha2.EncodeClusterStatus(s.ClusterStatus)
if err != nil {
s.Error(err, "failed encoding cluster status")
return
}
-
- s.Cluster.Spec.ProviderSpec.Value = ext
-
- // Do not update Machine if nothing has changed
- var p []byte // TEMPORARY
- if len(p) != 0 {
- pb, err := json.MarshalIndent(p, "", " ")
- if err != nil {
- s.Error(err, "failed to json marshal patch")
- return
- }
- s.Logger.V(1).Info("Patching cluster")
- result, err := s.ClusterClient.Patch(s.Cluster.Name, types.JSONPatchType, pb)
- if err != nil {
- s.Error(err, "failed to patch cluster")
- return
- }
- // Keep the resource version updated so the status update can succeed
- s.Cluster.ResourceVersion = result.ResourceVersion
- }
-
- // Set the APIEndpoint.
if s.ClusterStatus.Network.APIServerELB.DNSName != "" {
s.Cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{
{
@@ -207,12 +167,8 @@ func (s *Scope) Close() {
}
}
s.Cluster.Status.ProviderStatus = newStatus
-
- if !reflect.DeepEqual(s.Cluster.Status, s.ClusterCopy.Status) {
- s.Logger.V(1).Info("updating cluster status")
- if _, err := s.ClusterClient.UpdateStatus(s.Cluster); err != nil {
- s.Error(err, "failed to update cluster status")
- return
- }
+ if err := s.client.Status().Patch(ctx, s.Cluster, s.clusterPatch); err != nil {
+ s.Error(err, "failed to patch object status")
+ return
}
}
diff --git a/pkg/cloud/aws/actuators/control_plane_lock.go b/pkg/cloud/aws/actuators/control_plane_lock.go
index 7ee2132513..8cde56372d 100644
--- a/pkg/cloud/aws/actuators/control_plane_lock.go
+++ b/pkg/cloud/aws/actuators/control_plane_lock.go
@@ -19,8 +19,8 @@ package actuators
import (
"fmt"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
// ControlPlaneConfigMapName returns the name of the ConfigMap used to coordinate the bootstrapping of control plane
@@ -30,8 +30,8 @@ func ControlPlaneConfigMapName(cluster *v1alpha2.Cluster) string {
}
// ListOptionsForCluster returns a ListOptions with a label selector for clusterName.
-func ListOptionsForCluster(clusterName string) metav1.ListOptions {
- return metav1.ListOptions{
- LabelSelector: fmt.Sprintf("%s=%s", v1alpha2.MachineClusterLabelName, clusterName),
- }
+func ListOptionsForCluster(clusterName string) client.ListOptionFunc {
+ return client.MatchingLabels(map[string]string{
+ v1alpha2.MachineClusterLabelName: clusterName,
+ })
}
diff --git a/pkg/cloud/aws/actuators/getters.go b/pkg/cloud/aws/actuators/getters.go
index 7ed0a910b2..d3aaa13f45 100644
--- a/pkg/cloud/aws/actuators/getters.go
+++ b/pkg/cloud/aws/actuators/getters.go
@@ -17,26 +17,26 @@ limitations under the License.
package actuators
var (
- DefaultScopeGetter ScopeGetter = ScopeGetterFunc(NewScope)
+ DefaultClusterScopeGetter ClusterScopeGetter = ClusterScopeGetterFunc(NewClusterScope)
DefaultMachineScopeGetter MachineScopeGetter = MachineScopeGetterFunc(NewMachineScope)
)
-type ScopeGetter interface {
- GetScope(params ScopeParams) (*Scope, error)
+type ClusterScopeGetter interface {
+ ClusterScope(params ClusterScopeParams) (*ClusterScope, error)
}
-type ScopeGetterFunc func(params ScopeParams) (*Scope, error)
+type ClusterScopeGetterFunc func(params ClusterScopeParams) (*ClusterScope, error)
-func (f ScopeGetterFunc) GetScope(params ScopeParams) (*Scope, error) {
+func (f ClusterScopeGetterFunc) ClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
return f(params)
}
type MachineScopeGetter interface {
- GetMachineScope(params MachineScopeParams) (*MachineScope, error)
+ MachineScope(params MachineScopeParams) (*MachineScope, error)
}
type MachineScopeGetterFunc func(params MachineScopeParams) (*MachineScope, error)
-func (f MachineScopeGetterFunc) GetMachineScope(params MachineScopeParams) (*MachineScope, error) {
+func (f MachineScopeGetterFunc) MachineScope(params MachineScopeParams) (*MachineScope, error) {
return f(params)
}
diff --git a/pkg/cloud/aws/actuators/machine_scope.go b/pkg/cloud/aws/actuators/machine_scope.go
index 385385a9d0..db8e7da280 100644
--- a/pkg/cloud/aws/actuators/machine_scope.go
+++ b/pkg/cloud/aws/actuators/machine_scope.go
@@ -17,96 +17,162 @@ limitations under the License.
package actuators
import (
+ "context"
+ "fmt"
+ "time"
+
"github.com/go-logr/logr"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ "k8s.io/klog"
+ "k8s.io/utils/pointer"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
- client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2"
+ capierrors "sigs.k8s.io/cluster-api/pkg/errors"
+ "sigs.k8s.io/cluster-api/pkg/util"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
// MachineScopeParams defines the input parameters used to create a new MachineScope.
type MachineScopeParams struct {
AWSClients
- Cluster *clusterv1.Cluster
- Machine *clusterv1.Machine
- AWSMachine *v1alpha2.AWSMachine
- Client client.ClusterV1alpha2Interface
- Logger logr.Logger
+ Client client.Client
+ Logger logr.Logger
+ ProviderMachine *infrav1.AWSMachine
}
// NewMachineScope creates a new MachineScope from the supplied parameters.
// This is meant to be called for each machine actuator operation.
func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
- scope, err := NewScope(ScopeParams{
+ ctx := context.Background()
+
+ // Fetch the Machine.
+ machine, err := util.GetOwnerMachine(ctx, params.Client, params.ProviderMachine.ObjectMeta)
+ if err != nil {
+ return nil, err
+ } else if machine == nil {
+ klog.Infof("Waiting for Machine Controller to set OwnerRef on AWSMachine %q/%q",
+ params.ProviderMachine.Namespace, params.ProviderMachine.Name)
+ return nil, &capierrors.RequeueAfterError{RequeueAfter: 10 * time.Second}
+ }
+
+ // Fetch the cluster.
+ cluster, err := util.GetClusterFromMetadata(ctx, params.Client, machine.ObjectMeta)
+ if err != nil {
+ return nil, err
+ }
+
+ clusterScope, err := NewClusterScope(ClusterScopeParams{
AWSClients: params.AWSClients,
Client: params.Client,
- Cluster: params.Cluster,
+ Cluster: cluster,
Logger: params.Logger,
})
if err != nil {
return nil, err
}
- var machineClient client.MachineInterface
- if params.Client != nil {
- machineClient = params.Client.Machines(params.Machine.Namespace)
- }
- scope.Logger = scope.Logger.WithName(params.Machine.Name)
return &MachineScope{
- Scope: scope,
- Machine: params.Machine,
- MachineCopy: params.Machine.DeepCopy(),
- MachineClient: machineClient,
- MachineConfig: ¶ms.AWSMachine.Spec,
- MachineStatus: ¶ms.AWSMachine.Status,
+ client: params.Client,
+ patch: client.MergeFrom(params.ProviderMachine),
+ Parent: clusterScope,
+ Machine: machine,
+ ProviderMachine: params.ProviderMachine,
+ Logger: clusterScope.Logger.
+ WithName(fmt.Sprintf("machine=%s", machine.Name)).
+ WithName(fmt.Sprintf("providerMachine=%s", params.ProviderMachine.Name)),
}, nil
}
// MachineScope defines a scope defined around a machine and its cluster.
type MachineScope struct {
- *Scope
+ logr.Logger
+ patch client.Patch
+ client client.Client
- Machine *clusterv1.Machine
- // MachineCopy is used to generate a patch diff at the end of the scope's lifecycle.
- MachineCopy *clusterv1.Machine
- MachineClient client.MachineInterface
- MachineConfig *v1alpha2.AWSMachineSpec
- MachineStatus *v1alpha2.AWSMachineStatus
+ Parent *ClusterScope
+ Machine *clusterv1.Machine
+ ProviderMachine *infrav1.AWSMachine
}
-// Name returns the machine name.
+// Name returns the AWSMachine name.
func (m *MachineScope) Name() string {
- return m.Machine.Name
+ return m.ProviderMachine.Name
}
-// Namespace returns the machine namespace.
+// Namespace returns the namespace name.
func (m *MachineScope) Namespace() string {
- return m.Machine.Namespace
+ return m.ProviderMachine.Namespace
+}
+
+// ClusterName returns the parent Cluster name.
+func (m *MachineScope) ClusterName() string {
+ return m.Parent.Name()
+}
+
+// IsControlPlane returns true if the machine is a control plane.
+func (m *MachineScope) IsControlPlane() bool {
+ return util.IsControlPlaneMachine(m.Machine)
}
// Role returns the machine role from the labels.
func (m *MachineScope) Role() string {
- return m.Machine.Labels["set"]
+ if util.IsControlPlaneMachine(m.Machine) {
+ return "control-plane"
+ }
+ return "node"
}
// Region returns the machine region.
func (m *MachineScope) Region() string {
- return m.Scope.Region()
+ return m.Parent.Region()
+}
+
+// GetInstanceID returns the AWSMachine instance id from the status.
+func (m *MachineScope) GetInstanceID() *string {
+ return m.ProviderMachine.Status.InstanceID
+}
+
+// SetInstanceID sets the AWSMachine instance id.
+func (m *MachineScope) SetInstanceID(v string) {
+ m.ProviderMachine.Status.InstanceID = pointer.StringPtr(v)
+}
+
+// GetProviderID returns the AWSMachine providerID from the spec.
+func (m *MachineScope) GetProviderID() *string {
+ return m.ProviderMachine.Spec.ProviderID
}
-// GetMachine returns the machine wrapped in the scope.
-func (m *MachineScope) GetMachine() *clusterv1.Machine {
- return m.Machine
+// SetProviderID sets the AWSMachine providerID in spec.
+func (m *MachineScope) SetProviderID(v string) {
+ m.ProviderMachine.Spec.ProviderID = pointer.StringPtr(v)
}
-// GetScope returns the scope that is wrapping the machine.
-func (m *MachineScope) GetScope() *Scope {
- return m.Scope
+// GetInstanceID returns the AWSMachine instance state from the status.
+func (m *MachineScope) GetInstanceState() *infrav1.InstanceState {
+ return m.ProviderMachine.Status.InstanceState
+}
+
+// SetInstanceID sets the AWSMachine instance id.
+func (m *MachineScope) SetInstanceState(v infrav1.InstanceState) {
+ m.ProviderMachine.Status.InstanceState = &v
+}
+
+// SetAnnotation sets a key value annotation on the AWSMachine.
+func (m *MachineScope) SetAnnotation(key, value string) {
+ if m.ProviderMachine.Annotations == nil {
+ m.ProviderMachine.Annotations = map[string]string{}
+ }
+ m.ProviderMachine.Annotations[key] = value
}
// Close the MachineScope by updating the machine spec, machine status.
func (m *MachineScope) Close() {
- if m.MachineClient == nil {
- return
+ ctx := context.Background()
+
+ if err := m.client.Patch(ctx, m.ProviderMachine, m.patch); err != nil {
+ m.Logger.Error(err, "error patching object")
+ }
+
+ if err := m.client.Status().Patch(ctx, m.ProviderMachine, m.patch); err != nil {
+ m.Logger.Error(err, "error patching object status")
}
- // TODO
}
diff --git a/pkg/cloud/aws/actuators/session.go b/pkg/cloud/aws/actuators/session.go
new file mode 100644
index 0000000000..74417c4626
--- /dev/null
+++ b/pkg/cloud/aws/actuators/session.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package actuators
+
+import (
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+var (
+ sessionCache sync.Map
+)
+
+func sessionForRegion(region string) (*session.Session, error) {
+ s, ok := sessionCache.Load(region)
+ if ok {
+ return s.(*session.Session), nil
+ }
+
+ ns, err := session.NewSession(aws.NewConfig().WithRegion(region))
+ if err != nil {
+ return nil, err
+ }
+
+ sessionCache.Store(region, ns)
+ return ns, nil
+}
diff --git a/pkg/cloud/aws/services/certificates/service.go b/pkg/cloud/aws/services/certificates/service.go
index ebddff9d28..37c570683a 100644
--- a/pkg/cloud/aws/services/certificates/service.go
+++ b/pkg/cloud/aws/services/certificates/service.go
@@ -23,11 +23,11 @@ import (
// Service groups certificate related operations together and allows
// certificate updates to be applied to the actuator scope.
type Service struct {
- scope *actuators.Scope
+ scope *actuators.ClusterScope
}
// NewService returns a new certificates service for the given actuators scope.
-func NewService(scope *actuators.Scope) *Service {
+func NewService(scope *actuators.ClusterScope) *Service {
return &Service{
scope: scope,
}
diff --git a/pkg/cloud/aws/services/ec2/BUILD.bazel b/pkg/cloud/aws/services/ec2/BUILD.bazel
index 6bb43e1027..11aa83ba6e 100644
--- a/pkg/cloud/aws/services/ec2/BUILD.bazel
+++ b/pkg/cloud/aws/services/ec2/BUILD.bazel
@@ -52,6 +52,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
+ "//pkg/apis:go_default_library",
"//pkg/apis/infrastructure/v1alpha2:go_default_library",
"//pkg/cloud/aws/actuators:go_default_library",
"//pkg/cloud/aws/services/awserrors:go_default_library",
@@ -62,6 +63,10 @@ go_test(
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+ "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
+ "//vendor/k8s.io/utils/pointer:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/apis:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client/fake:go_default_library",
],
)
diff --git a/pkg/cloud/aws/services/ec2/ami_test.go b/pkg/cloud/aws/services/ec2/ami_test.go
index 6318b8a7f8..345d8e7366 100644
--- a/pkg/cloud/aws/services/ec2/ami_test.go
+++ b/pkg/cloud/aws/services/ec2/ami_test.go
@@ -63,7 +63,7 @@ func TestAMIs(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{},
AWSClients: actuators.AWSClients{
EC2: ec2Mock,
diff --git a/pkg/cloud/aws/services/ec2/gateways_test.go b/pkg/cloud/aws/services/ec2/gateways_test.go
index 17dad17805..98255dd357 100644
--- a/pkg/cloud/aws/services/ec2/gateways_test.go
+++ b/pkg/cloud/aws/services/ec2/gateways_test.go
@@ -106,7 +106,7 @@ func TestReconcileInternetGateways(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/ec2/instances.go b/pkg/cloud/aws/services/ec2/instances.go
index a6ca34efc9..7e7a2c84a1 100644
--- a/pkg/cloud/aws/services/ec2/instances.go
+++ b/pkg/cloud/aws/services/ec2/instances.go
@@ -38,14 +38,14 @@ import (
)
// InstanceByTags returns the existing instance or nothing if it doesn't exist.
-func (s *Service) InstanceByTags(machine *actuators.MachineScope) (*v1alpha2.Instance, error) {
+func (s *Service) InstanceByTags(scope *actuators.MachineScope) (*v1alpha2.Instance, error) {
s.scope.V(2).Info("Looking for existing machine instance by tags")
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
filter.EC2.VPC(s.scope.VPC().ID),
filter.EC2.ClusterOwned(s.scope.Name()),
- filter.EC2.Name(machine.Name()),
+ filter.EC2.Name(scope.Name()),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning),
},
}
@@ -103,20 +103,20 @@ func (s *Service) InstanceIfExists(id *string) (*v1alpha2.Instance, error) {
}
// createInstance runs an ec2 instance.
-func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken string) (*v1alpha2.Instance, error) {
+func (s *Service) createInstance(scope *actuators.MachineScope) (*v1alpha2.Instance, error) {
s.scope.V(2).Info("Creating an instance for a machine")
input := &v1alpha2.Instance{
- Type: machine.MachineConfig.InstanceType,
- IAMProfile: machine.MachineConfig.IAMInstanceProfile,
- RootDeviceSize: machine.MachineConfig.RootDeviceSize,
+ Type: scope.ProviderMachine.Spec.InstanceType,
+ IAMProfile: scope.ProviderMachine.Spec.IAMInstanceProfile,
+ RootDeviceSize: scope.ProviderMachine.Spec.RootDeviceSize,
}
input.Tags = v1alpha2.Build(v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
Lifecycle: v1alpha2.ResourceLifecycleOwned,
- Name: aws.String(machine.Name()),
- Role: aws.String(machine.Role()),
+ Name: aws.String(scope.Name()),
+ Role: aws.String(scope.Role()),
Additional: v1alpha2.Tags{
v1alpha2.ClusterAWSCloudProviderTagKey(s.scope.Name()): string(v1alpha2.ResourceLifecycleOwned),
},
@@ -124,10 +124,10 @@ func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken
var err error
// Pick image from the machine configuration, or use a default one.
- if machine.MachineConfig.AMI.ID != nil {
- input.ImageID = *machine.MachineConfig.AMI.ID
+ if scope.ProviderMachine.Spec.AMI.ID != nil {
+ input.ImageID = *scope.ProviderMachine.Spec.AMI.ID
} else {
- input.ImageID, err = s.defaultAMILookup(machine.MachineConfig.ImageLookupOrg, "ubuntu", "18.04", *machine.Machine.Spec.Version)
+ input.ImageID, err = s.defaultAMILookup(scope.ProviderMachine.Spec.ImageLookupOrg, "ubuntu", "18.04", *scope.Machine.Spec.Version)
if err != nil {
return nil, err
}
@@ -136,15 +136,15 @@ func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken
// Pick subnet from the machine configuration, or based on the availability zone specified,
// or default to the first private subnet available.
// TODO(vincepri): Move subnet picking logic to its own function/method.
- if machine.MachineConfig.Subnet != nil && machine.MachineConfig.Subnet.ID != nil {
- input.SubnetID = *machine.MachineConfig.Subnet.ID
- } else if machine.MachineConfig.AvailabilityZone != nil {
- sns := s.scope.Subnets().FilterPrivate().FilterByZone(*machine.MachineConfig.AvailabilityZone)
+ if scope.ProviderMachine.Spec.Subnet != nil && scope.ProviderMachine.Spec.Subnet.ID != nil {
+ input.SubnetID = *scope.ProviderMachine.Spec.Subnet.ID
+ } else if scope.ProviderMachine.Spec.AvailabilityZone != nil {
+ sns := s.scope.Subnets().FilterPrivate().FilterByZone(*scope.ProviderMachine.Spec.AvailabilityZone)
if len(sns) == 0 {
return nil, awserrors.NewFailedDependency(
errors.Errorf("failed to run machine %q, no subnets available in availaibility zone %q",
- machine.Name(),
- *machine.MachineConfig.AvailabilityZone,
+ scope.Name(),
+ *scope.ProviderMachine.Spec.AvailabilityZone,
),
)
}
@@ -153,7 +153,7 @@ func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken
sns := s.scope.Subnets().FilterPrivate()
if len(sns) == 0 {
return nil, awserrors.NewFailedDependency(
- errors.Errorf("failed to run machine %q, no subnets available", machine.Name()),
+ errors.Errorf("failed to run machine %q, no subnets available", scope.Name()),
)
}
input.SubnetID = sns[0].ID
@@ -171,65 +171,48 @@ func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken
)
}
- s.scope.V(3).Info("Generating CA key pair")
- // caCertHash, err := certificates.GenerateCertificateHash(s.scope.ClusterConfig.CAKeyPair.Cert)
- // if err != nil {
- // return input, err
- // }
+ // Set userdata.
+ input.UserData = aws.String(*scope.Machine.Spec.Bootstrap.Data)
- // apiServerEndpoint := fmt.Sprintf("%s:%d", machine.Network().APIServerELB.DNSName, apiServerBindPort)
-
- // apply values based on the role of the machine
- switch machine.Role() {
- case "controlplane":
- // TODO
- case "node":
- // TODO
-
- default:
- return nil, errors.Errorf("Unknown node role %q", machine.Role())
- }
-
- ids, err := s.GetCoreSecurityGroups(machine)
+ // Set security groups.
+ ids, err := s.GetCoreSecurityGroups(scope)
if err != nil {
return nil, err
}
- input.SecurityGroupIDs = append(input.SecurityGroupIDs,
- ids...,
- )
+ input.SecurityGroupIDs = append(input.SecurityGroupIDs, ids...)
// Pick SSH key, if any.
- if machine.MachineConfig.KeyName != "" {
- input.KeyName = aws.String(machine.MachineConfig.KeyName)
+ if scope.ProviderMachine.Spec.KeyName != "" {
+ input.KeyName = aws.String(scope.ProviderMachine.Spec.KeyName)
} else {
input.KeyName = aws.String(defaultSSHKeyName)
}
- s.scope.V(2).Info("Running instance", "machine-role", machine.Role())
- out, err := s.runInstance(machine.Role(), input)
+ s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
+ out, err := s.runInstance(scope.Role(), input)
if err != nil {
return nil, err
}
- record.Eventf(machine.Machine, "CreatedInstance", "Created new %s instance with id %q", machine.Role(), out.ID)
+ record.Eventf(scope.Machine, "CreatedInstance", "Created new %s instance with id %q", scope.Role(), out.ID)
return out, nil
}
// GetCoreSecurityGroups looks up the security group IDs managed by this actuator
// They are considered "core" to its proper functioning
-func (s *Service) GetCoreSecurityGroups(machine *actuators.MachineScope) ([]string, error) {
+func (s *Service) GetCoreSecurityGroups(scope *actuators.MachineScope) ([]string, error) {
// These are common across both controlplane and node machines
sgRoles := []v1alpha2.SecurityGroupRole{
v1alpha2.SecurityGroupNode,
v1alpha2.SecurityGroupLB,
}
- switch machine.Role() {
+ switch scope.Role() {
case "node":
// Just the common security groups above
case "controlplane":
sgRoles = append(sgRoles, v1alpha2.SecurityGroupControlPlane)
default:
- return nil, errors.Errorf("Unknown node role %q", machine.Role())
+ return nil, errors.Errorf("Unknown node role %q", scope.Role())
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
@@ -282,49 +265,52 @@ func (s *Service) TerminateInstanceAndWait(instanceID string) error {
}
// MachineExists will return whether or not a machine exists.
-func (s *Service) MachineExists(machine *actuators.MachineScope) (bool, error) {
- var err error
- var instance *v1alpha2.Instance
- if machine.MachineStatus.InstanceID != nil {
- instance, err = s.InstanceIfExists(machine.MachineStatus.InstanceID)
+func (s *Service) MachineExists(scope *actuators.MachineScope) (bool, error) {
+ var (
+ err error
+ instance *v1alpha2.Instance
+ )
+
+ if id := scope.GetInstanceID(); id != nil {
+ instance, err = s.InstanceIfExists(id)
} else {
- instance, err = s.InstanceByTags(machine)
+ instance, err = s.InstanceByTags(scope)
}
if err != nil {
if awserrors.IsNotFound(err) {
return false, nil
}
- return false, errors.Wrapf(err, "failed to lookup machine %q", machine.Name())
+ return false, errors.Wrapf(err, "failed to lookup machine %q", scope.Name())
}
return instance != nil, nil
}
// CreateOrGetMachine will either return an existing instance or create and return an instance.
-func (s *Service) CreateOrGetMachine(machine *actuators.MachineScope, bootstrapToken string) (*v1alpha2.Instance, error) {
+func (s *Service) CreateOrGetMachine(scope *actuators.MachineScope) (*v1alpha2.Instance, error) {
s.scope.V(2).Info("Attempting to create or get machine")
// instance id exists, try to get it
- if machine.MachineStatus.InstanceID != nil {
- s.scope.V(2).Info("Looking up machine by id", "instance-id", *machine.MachineStatus.InstanceID)
+ if id := scope.GetInstanceID(); id != nil {
+ s.scope.V(2).Info("Looking up machine by id", "instance-id", *id)
- instance, err := s.InstanceIfExists(machine.MachineStatus.InstanceID)
+ instance, err := s.InstanceIfExists(id)
if err != nil && !awserrors.IsNotFound(err) {
- return nil, errors.Wrapf(err, "failed to look up machine %q by id %q", machine.Name(), *machine.MachineStatus.InstanceID)
+ return nil, errors.Wrapf(err, "failed to look up machine %q by id %q", scope.Name(), *id)
} else if err == nil && instance != nil {
return instance, nil
}
}
s.scope.V(2).Info("Looking up machine by tags")
- instance, err := s.InstanceByTags(machine)
+ instance, err := s.InstanceByTags(scope)
if err != nil && !awserrors.IsNotFound(err) {
- return nil, errors.Wrapf(err, "failed to query machine %q instance by tags", machine.Name())
+ return nil, errors.Wrapf(err, "failed to query machine %q instance by tags", scope.Name())
} else if err == nil && instance != nil {
return instance, nil
}
- return s.createInstance(machine, bootstrapToken)
+ return s.createInstance(scope)
}
func (s *Service) runInstance(role string, i *v1alpha2.Instance) (*v1alpha2.Instance, error) {
diff --git a/pkg/cloud/aws/services/ec2/instances_test.go b/pkg/cloud/aws/services/ec2/instances_test.go
index 847a801f19..2ac40eef5a 100644
--- a/pkg/cloud/aws/services/ec2/instances_test.go
+++ b/pkg/cloud/aws/services/ec2/instances_test.go
@@ -24,12 +24,14 @@ import (
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/ec2/mock_ec2iface"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/elb/mock_elbiface"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestInstanceIfExists(t *testing.T) {
@@ -156,7 +158,7 @@ func TestInstanceIfExists(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{},
AWSClients: actuators.AWSClients{
EC2: ec2Mock,
@@ -234,7 +236,7 @@ func TestTerminateInstance(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{},
AWSClients: actuators.AWSClients{
EC2: ec2Mock,
@@ -281,13 +283,13 @@ dTga1FiyISsMchVaVKD5aX7hkxMP1/C98KdVzWQ4k12TBOhZDYUS67M4ibBtw/og
vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
-----END CERTIFICATE-----
`)
+
testcases := []struct {
name string
machine clusterv1.Machine
machineConfig *v1alpha2.AWSMachineSpec
clusterStatus *v1alpha2.AWSClusterProviderStatus
clusterConfig *v1alpha2.AWSClusterProviderSpec
- cluster clusterv1.Cluster
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(instance *v1alpha2.Instance, err error)
}{
@@ -297,6 +299,11 @@ vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ Data: pointer.StringPtr("user-data"),
+ },
+ },
},
machineConfig: &v1alpha2.AWSMachineSpec{
AMI: v1alpha2.AWSResourceReference{
@@ -339,22 +346,6 @@ vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
Key: []byte("y"),
},
},
- cluster: clusterv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test1",
- },
- Spec: clusterv1.ClusterSpec{
- ClusterNetwork: &clusterv1.ClusterNetworkingConfig{
- ServiceDomain: "cluster.local",
- Services: clusterv1.NetworkRanges{
- CIDRBlocks: []string{"192.168.0.0/16"},
- },
- Pods: clusterv1.NetworkRanges{
- CIDRBlocks: []string{"192.168.0.0/16"},
- },
- },
- },
- },
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
@@ -398,6 +389,11 @@ vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ Data: pointer.StringPtr("user-data"),
+ },
+ },
},
machineConfig: &v1alpha2.AWSMachineSpec{
AMI: v1alpha2.AWSResourceReference{
@@ -454,22 +450,6 @@ vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
Key: []byte("y"),
},
},
- cluster: clusterv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test1",
- },
- Spec: clusterv1.ClusterSpec{
- ClusterNetwork: &clusterv1.ClusterNetworkingConfig{
- ServiceDomain: "cluster.local",
- Services: clusterv1.NetworkRanges{
- CIDRBlocks: []string{"192.168.0.0/16"},
- },
- Pods: clusterv1.NetworkRanges{
- CIDRBlocks: []string{"192.168.0.0/16"},
- },
- },
- },
- },
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
@@ -522,33 +502,70 @@ vuO9LYxDXLVY9F7W4ccyCqe27Cj1xyAvdZxwhITrib8Wg5CMqoRpqTw5V3+TpA==
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewMachineScope(actuators.MachineScopeParams{
- Cluster: &tc.cluster,
- Machine: &clusterv1.Machine{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "set": "node",
+ cluster := &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test1",
+ },
+ Spec: clusterv1.ClusterSpec{
+ ClusterNetwork: &clusterv1.ClusterNetworkingConfig{
+ ServiceDomain: "cluster.local",
+ Services: clusterv1.NetworkRanges{
+ CIDRBlocks: []string{"192.168.0.0/16"},
+ },
+ Pods: clusterv1.NetworkRanges{
+ CIDRBlocks: []string{"192.168.0.0/16"},
+ },
+ },
+ },
+ }
+
+ machine := &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test1",
+ Labels: map[string]string{
+ "set": "node",
+ clusterv1.MachineClusterLabelName: "test1",
+ },
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ Data: pointer.StringPtr("user-data"),
+ },
+ },
+ }
+
+ awsmachine := &v1alpha2.AWSMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: clusterv1.SchemeGroupVersion.String(),
+ Kind: "Machine",
+ Name: "test1",
},
},
},
- AWSMachine: &v1alpha2.AWSMachine{},
+ }
+
+ scope, err := actuators.NewMachineScope(actuators.MachineScopeParams{
+ ProviderMachine: awsmachine,
AWSClients: actuators.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
},
+ Client: fake.NewFakeClient(cluster, machine),
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
- scope.Scope.ClusterConfig = tc.clusterConfig
- scope.Scope.ClusterStatus = tc.clusterStatus
- scope.MachineConfig = tc.machineConfig
+ scope.Parent.ClusterConfig = tc.clusterConfig
+ scope.Parent.ClusterStatus = tc.clusterStatus
+ scope.ProviderMachine.Spec = *tc.machineConfig
tc.expect(ec2Mock.EXPECT())
- s := NewService(scope.Scope)
- instance, err := s.createInstance(scope, "token")
+ s := NewService(scope.Parent)
+ instance, err := s.createInstance(scope)
tc.check(instance, err)
})
}
diff --git a/pkg/cloud/aws/services/ec2/natgateways_test.go b/pkg/cloud/aws/services/ec2/natgateways_test.go
index fc8a503238..867970338b 100644
--- a/pkg/cloud/aws/services/ec2/natgateways_test.go
+++ b/pkg/cloud/aws/services/ec2/natgateways_test.go
@@ -288,7 +288,7 @@ func TestReconcileNatGateways(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/ec2/routetables_test.go b/pkg/cloud/aws/services/ec2/routetables_test.go
index 7dd9f686d9..aaab45f5d1 100644
--- a/pkg/cloud/aws/services/ec2/routetables_test.go
+++ b/pkg/cloud/aws/services/ec2/routetables_test.go
@@ -148,7 +148,7 @@ func TestReconcileRouteTables(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/ec2/securitygroups_test.go b/pkg/cloud/aws/services/ec2/securitygroups_test.go
index 832e6f1c7c..eaa77bea0c 100644
--- a/pkg/cloud/aws/services/ec2/securitygroups_test.go
+++ b/pkg/cloud/aws/services/ec2/securitygroups_test.go
@@ -140,7 +140,7 @@ func TestReconcileSecurityGroups(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/ec2/service.go b/pkg/cloud/aws/services/ec2/service.go
index 1387fb8dbe..8b82ad9f26 100644
--- a/pkg/cloud/aws/services/ec2/service.go
+++ b/pkg/cloud/aws/services/ec2/service.go
@@ -24,11 +24,11 @@ import (
// The interfaces are broken down like this to group functions together.
// One alternative is to have a large list of functions from the ec2 client.
type Service struct {
- scope *actuators.Scope
+ scope *actuators.ClusterScope
}
// NewService returns a new service given the ec2 api client.
-func NewService(scope *actuators.Scope) *Service {
+func NewService(scope *actuators.ClusterScope) *Service {
return &Service{
scope: scope,
}
diff --git a/pkg/cloud/aws/services/ec2/subnets_test.go b/pkg/cloud/aws/services/ec2/subnets_test.go
index 12c3a6a309..9cef7e045a 100644
--- a/pkg/cloud/aws/services/ec2/subnets_test.go
+++ b/pkg/cloud/aws/services/ec2/subnets_test.go
@@ -25,13 +25,21 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/ec2/mock_ec2iface"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/elb/mock_elbiface"
+ capi "sigs.k8s.io/cluster-api/pkg/apis"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
)
+func init() {
+ capi.AddToScheme(scheme.Scheme)
+ apis.AddToScheme(scheme.Scheme)
+}
+
const (
subnetsVPCID = "vpc-subnets"
)
@@ -487,7 +495,7 @@ func TestReconcileSubnets(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
@@ -657,7 +665,7 @@ func TestDiscoverSubnets(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/ec2/vpc_test.go b/pkg/cloud/aws/services/ec2/vpc_test.go
index 68c7569fb6..032c03ffac 100644
--- a/pkg/cloud/aws/services/ec2/vpc_test.go
+++ b/pkg/cloud/aws/services/ec2/vpc_test.go
@@ -130,7 +130,7 @@ func TestReconcileVPC(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/elb/loadbalancer_test.go b/pkg/cloud/aws/services/elb/loadbalancer_test.go
index d771288073..ad13a9f75c 100644
--- a/pkg/cloud/aws/services/elb/loadbalancer_test.go
+++ b/pkg/cloud/aws/services/elb/loadbalancer_test.go
@@ -69,7 +69,7 @@ func TestDeleteLoadBalancers(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
diff --git a/pkg/cloud/aws/services/elb/service.go b/pkg/cloud/aws/services/elb/service.go
index cf84c6c52e..19a025b306 100644
--- a/pkg/cloud/aws/services/elb/service.go
+++ b/pkg/cloud/aws/services/elb/service.go
@@ -24,11 +24,11 @@ import (
// The interfaces are broken down like this to group functions together.
// One alternative is to have a large list of functions from the ec2 client.
type Service struct {
- scope *actuators.Scope
+ scope *actuators.ClusterScope
}
// NewService returns a new service given the api clients.
-func NewService(scope *actuators.Scope) *Service {
+func NewService(scope *actuators.ClusterScope) *Service {
return &Service{
scope: scope,
}
diff --git a/pkg/cloud/aws/services/elb/service_test.go b/pkg/cloud/aws/services/elb/service_test.go
index e9fdc1b2ae..9a0c8e1007 100644
--- a/pkg/cloud/aws/services/elb/service_test.go
+++ b/pkg/cloud/aws/services/elb/service_test.go
@@ -34,7 +34,7 @@ func TestNewService(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- scope, err := actuators.NewScope(actuators.ScopeParams{
+ scope, err := actuators.NewClusterScope(actuators.ClusterScopeParams{
Cluster: &clusterv1.Cluster{},
AWSClients: actuators.AWSClients{
EC2: ec2Mock,
diff --git a/pkg/cloud/aws/services/interfaces.go b/pkg/cloud/aws/services/interfaces.go
index 0598afcbcc..07b700df86 100644
--- a/pkg/cloud/aws/services/interfaces.go
+++ b/pkg/cloud/aws/services/interfaces.go
@@ -66,7 +66,7 @@ type EC2MachineInterface interface {
TerminateInstance(id string) error
GetCoreSecurityGroups(machine *actuators.MachineScope) ([]string, error)
GetInstanceSecurityGroups(id string) (map[string][]string, error)
- CreateOrGetMachine(machine *actuators.MachineScope, token string) (*providerv1.Instance, error)
+ CreateOrGetMachine(machine *actuators.MachineScope) (*providerv1.Instance, error)
UpdateInstanceSecurityGroups(id string, securityGroups []string) error
UpdateResourceTags(resourceID *string, create map[string]string, remove map[string]string) error
}
diff --git a/pkg/cloud/aws/services/mocks/services_mock.go b/pkg/cloud/aws/services/mocks/services_mock.go
index c0e7f6819e..c9185937a9 100644
--- a/pkg/cloud/aws/services/mocks/services_mock.go
+++ b/pkg/cloud/aws/services/mocks/services_mock.go
@@ -51,18 +51,18 @@ func (m *MockEC2Interface) EXPECT() *MockEC2InterfaceMockRecorder {
}
// CreateOrGetMachine mocks base method
-func (m *MockEC2Interface) CreateOrGetMachine(arg0 *actuators.MachineScope, arg1 string) (*v1alpha2.Instance, error) {
+func (m *MockEC2Interface) CreateOrGetMachine(arg0 *actuators.MachineScope) (*v1alpha2.Instance, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateOrGetMachine", arg0, arg1)
+ ret := m.ctrl.Call(m, "CreateOrGetMachine", arg0)
ret0, _ := ret[0].(*v1alpha2.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateOrGetMachine indicates an expected call of CreateOrGetMachine
-func (mr *MockEC2InterfaceMockRecorder) CreateOrGetMachine(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockEC2InterfaceMockRecorder) CreateOrGetMachine(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrGetMachine", reflect.TypeOf((*MockEC2Interface)(nil).CreateOrGetMachine), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrGetMachine", reflect.TypeOf((*MockEC2Interface)(nil).CreateOrGetMachine), arg0)
}
// DeleteBastion mocks base method
diff --git a/pkg/controller/awsmachine/BUILD.bazel b/pkg/controller/awsmachine/BUILD.bazel
index 26d09e18ba..3d97b6ecd2 100644
--- a/pkg/controller/awsmachine/BUILD.bazel
+++ b/pkg/controller/awsmachine/BUILD.bazel
@@ -2,13 +2,29 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
- srcs = ["awsmachine_controller.go"],
+ srcs = [
+ "annotations.go",
+ "awsmachine_controller.go",
+ "security_groups.go",
+ "tags.go",
+ ],
importpath = "sigs.k8s.io/cluster-api-provider-aws/pkg/controller/awsmachine",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/infrastructure/v1alpha2:go_default_library",
+ "//pkg/cloud/aws/actuators:go_default_library",
+ "//pkg/cloud/aws/services:go_default_library",
+ "//pkg/cloud/aws/services/ec2:go_default_library",
+ "//pkg/cloud/aws/services/elb:go_default_library",
+ "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
+ "//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
+ "//vendor/k8s.io/klog:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/errors:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/util:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library",
@@ -32,12 +48,11 @@ go_test(
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
- "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/apis:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library",
- "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library",
],
)
diff --git a/pkg/controller/awsmachine/annotations.go b/pkg/controller/awsmachine/annotations.go
new file mode 100644
index 0000000000..fc35991918
--- /dev/null
+++ b/pkg/controller/awsmachine/annotations.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package awsmachine
+
+import (
+ "encoding/json"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+)
+
+// updateMachineAnnotationJSON updates the `annotation` on `machine` with
+// `content`. `content` in this case should be a `map[string]interface{}`
+// suitable for turning into JSON. This `content` map will be marshalled into a
+// JSON string before being set as the given `annotation`.
+func (r *ReconcileAWSMachine) updateMachineAnnotationJSON(machine *infrav1.AWSMachine, annotation string, content map[string]interface{}) error {
+ b, err := json.Marshal(content)
+ if err != nil {
+ return err
+ }
+
+ r.updateMachineAnnotation(machine, annotation, string(b))
+ return nil
+}
+
+// updateMachineAnnotation updates the `annotation` on the given `machine` with
+// `content`.
+func (r *ReconcileAWSMachine) updateMachineAnnotation(machine *infrav1.AWSMachine, annotation string, content string) {
+ // Get the annotations
+ annotations := machine.GetAnnotations()
+
+ // Set our annotation to the given content.
+ annotations[annotation] = content
+
+ // Update the machine object with these annotations
+ machine.SetAnnotations(annotations)
+}
+
+// Returns a map[string]interface from a JSON annotation.
+// This method gets the given `annotation` from the `machine` and unmarshalls it
+// from a JSON string into a `map[string]interface{}`.
+func (r *ReconcileAWSMachine) machineAnnotationJSON(machine *infrav1.AWSMachine, annotation string) (map[string]interface{}, error) {
+ out := map[string]interface{}{}
+
+ jsonAnnotation := r.machineAnnotation(machine, annotation)
+ if len(jsonAnnotation) == 0 {
+ return out, nil
+ }
+
+ err := json.Unmarshal([]byte(jsonAnnotation), &out)
+ if err != nil {
+ return out, err
+ }
+
+ return out, nil
+}
+
+// Fetches the specific machine annotation.
+func (r *ReconcileAWSMachine) machineAnnotation(machine *infrav1.AWSMachine, annotation string) string {
+ return machine.GetAnnotations()[annotation]
+}
diff --git a/pkg/controller/awsmachine/awsmachine_controller.go b/pkg/controller/awsmachine/awsmachine_controller.go
index 269f391434..66ebecf7d2 100644
--- a/pkg/controller/awsmachine/awsmachine_controller.go
+++ b/pkg/controller/awsmachine/awsmachine_controller.go
@@ -18,10 +18,22 @@ package awsmachine
import (
"context"
+ "fmt"
+ "time"
- "k8s.io/apimachinery/pkg/api/errors"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
- infrastructurev1alpha2 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/klog"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/elb"
+ clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+ capierrors "sigs.k8s.io/cluster-api/pkg/errors"
+ "sigs.k8s.io/cluster-api/pkg/util"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
@@ -30,6 +42,12 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)
+const (
+ waitForClusterInfrastructureReadyDuration = 15 * time.Second //nolint
+ waitForControlPlaneMachineExistenceDuration = 5 * time.Second //nolint
+ waitForControlPlaneReadyDuration = 5 * time.Second //nolint
+)
+
// Add creates a new AWSMachine Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
@@ -37,8 +55,11 @@ func Add(mgr manager.Manager) error {
}
// newReconciler returns a new reconcile.Reconciler
-func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcileAWSMachine{Client: mgr.GetClient(), scheme: mgr.GetScheme()}
+func newReconciler(mgr manager.Manager) *ReconcileAWSMachine {
+ return &ReconcileAWSMachine{
+ Client: mgr.GetClient(),
+ scheme: mgr.GetScheme(),
+ }
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
@@ -50,12 +71,24 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
}
// Watch for changes to AWSMachine
- err = c.Watch(&source.Kind{Type: &infrastructurev1alpha2.AWSMachine{}}, &handler.EnqueueRequestForObject{})
+ err = c.Watch(
+ &source.Kind{Type: &infrav1.AWSMachine{}},
+ &handler.EnqueueRequestForObject{},
+ )
if err != nil {
return err
}
- return nil
+ return c.Watch(
+ &source.Kind{Type: &clusterv1.Machine{}},
+ &handler.EnqueueRequestsFromMapFunc{
+ ToRequests: util.MachineToInfrastructureMapFunc(schema.GroupVersionKind{
+ Group: infrav1.SchemeGroupVersion.Group,
+ Version: infrav1.SchemeGroupVersion.Version,
+ Kind: "AWSMachine",
+ }),
+ },
+ )
}
var _ reconcile.Reconciler = &ReconcileAWSMachine{}
@@ -69,18 +102,263 @@ type ReconcileAWSMachine struct {
// Reconcile reads that state of the cluster for a AWSMachine object and makes changes based on the state read
// and what is in the AWSMachine.Spec
func (r *ReconcileAWSMachine) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- // Fetch the AWSMachine instance
- instance := &infrastructurev1alpha2.AWSMachine{}
- err := r.Get(context.TODO(), request.NamespacedName, instance)
+ ctx := context.Background()
+
+ // Fetch the AWSMachine instance.
+ awsm := &infrav1.AWSMachine{}
+ err := r.Get(ctx, request.NamespacedName, awsm)
if err != nil {
- if errors.IsNotFound(err) {
- // Object not found, return. Created objects are automatically garbage collected.
- // For additional cleanup logic use finalizers.
+ if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
- // Error reading the object - requeue the request.
+ return reconcile.Result{}, err
+ }
+
+ // If the Machine hasn't been deleted and doesn't have a finalizer, add one.
+ if awsm.ObjectMeta.DeletionTimestamp.IsZero() {
+ if !util.Contains(awsm.Finalizers, clusterv1.MachineFinalizer) {
+ awsm.Finalizers = append(awsm.ObjectMeta.Finalizers, clusterv1.MachineFinalizer)
+ }
+ }
+
+ // Create the scope
+ scope, err := actuators.NewMachineScope(actuators.MachineScopeParams{
+ ProviderMachine: awsm,
+ Client: r.Client,
+ })
+ if err != nil {
+ if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok {
+ return reconcile.Result{RequeueAfter: requeueErr.GetRequeueAfter()}, nil
+ }
+ return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
+ }
+ defer scope.Close()
+
+ // Make sure bootstrap data is available and populated.
+ if scope.Machine.Spec.Bootstrap.Data == nil || *scope.Machine.Spec.Bootstrap.Data == "" {
+ klog.Infof("Waiting for bootstrap data to be available on AWSMachine %q/%q", awsm.Namespace, awsm.Name)
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ // Call the internal reconciler.
+ if err := r.reconcile(ctx, scope); err != nil {
+ if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok {
+ klog.Infof("Reconciliation for AWSMachine %q in namespace %q asked to requeue: %v", awsm.Name, awsm.Namespace, err)
+ return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil
+ }
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
+
+func (r *ReconcileAWSMachine) reconcile(ctx context.Context, scope *actuators.MachineScope) error {
+ exist, err := r.exists(scope)
+ if err != nil {
+ klog.Errorf("Failed to check if Machine %q infrastructure exists: %v", scope.Name(), err)
+ return err
+ }
+
+ // Reconcile ProviderID.
+ if pid := scope.GetProviderID(); pid == nil || *pid == "" {
+ scope.SetProviderID(fmt.Sprintf("aws:////%s", *scope.GetInstanceID()))
+ }
+
+ if exist {
+ scope.Info("Reconciling Machine triggers idempotent update")
+ return r.update(scope)
+ }
+
+ // Machine resource created. Machine does not yet exist.
+ scope.Info("Reconciling Machine triggers idempotent create")
+ return r.create(scope)
+}
+
+// create creates a machine and is invoked by the machine controller.
+func (r *ReconcileAWSMachine) create(scope *actuators.MachineScope) error {
+ if scope.Parent.Cluster.Annotations[infrav1.AnnotationClusterInfrastructureReady] != infrav1.ValueReady {
+ scope.Info("Cluster infrastructure is not ready yet - requeuing machine")
+ return &capierrors.RequeueAfterError{RequeueAfter: waitForClusterInfrastructureReadyDuration}
+ }
+
+ ec2svc := ec2.NewService(scope.Parent)
+
+ scope.Info("Retrieving machines for cluster")
+ machineList := &clusterv1.MachineList{}
+ if err := r.List(context.Background(), machineList, actuators.ListOptionsForCluster(scope.Parent.Name())); err != nil {
+ return errors.Wrapf(err, "failed to retrieve machines in cluster %q", scope.Parent.Name())
+ }
+
+ controlPlaneMachines := util.GetControlPlaneMachinesFromList(machineList)
+ if len(controlPlaneMachines) == 0 {
+ scope.Info("No control plane machines exist yet - requeuing")
+ return &capierrors.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineExistenceDuration}
+ }
+
+ // Create the Machine.
+ i, err := ec2svc.CreateOrGetMachine(scope)
+ if err != nil {
+ return errors.Errorf("failed to create or get machine: %+v", err)
+ }
+
+ scope.SetInstanceID(i.ID)
+ scope.SetInstanceState(i.State)
+ scope.SetAnnotation("cluster-api-provider-aws", "true")
+
+ if err := r.reconcileLBAttachment(scope, i); err != nil {
+ return errors.Errorf("failed to reconcile LB attachment: %+v", err)
+ }
+
+ scope.Info("Create completed")
+ return nil
+}
+
+func (r *ReconcileAWSMachine) exists(scope *actuators.MachineScope) (bool, error) {
+ ec2svc := ec2.NewService(scope.Parent)
+
+ // TODO worry about pointers. instance if exists returns *any* instance
+ if scope.GetInstanceID() == nil {
+ return false, nil
+ }
+
+ instance, err := ec2svc.InstanceIfExists(scope.GetInstanceID())
+ if err != nil {
+ return false, errors.Errorf("failed to retrieve instance: %+v", err)
+ }
+
+ if instance == nil {
+ return false, nil
+ }
+
+ scope.Info("Found instance for machine", "instance", instance)
+
+ switch instance.State {
+ case infrav1.InstanceStateRunning:
+ scope.Info("Machine instance is running", "instance-id", *scope.GetInstanceID())
+ case infrav1.InstanceStatePending:
+ scope.Info("Machine instance is pending", "instance-id", *scope.GetInstanceID())
+ default:
+ return false, nil
+ }
+
+ scope.SetInstanceState(instance.State)
+
+ if err := r.reconcileLBAttachment(scope, instance); err != nil {
+ return true, err
+ }
+
+ return true, nil
+}
+
+func (r *ReconcileAWSMachine) update(scope *actuators.MachineScope) error {
+ ec2svc := ec2.NewService(scope.Parent)
+
+ // Get the current instance description from AWS.
+ instanceDescription, err := ec2svc.InstanceIfExists(scope.GetInstanceID())
+ if err != nil {
+ return errors.Errorf("failed to get instance: %+v", err)
+ }
+
+ // We can now compare the various AWS state to the state we were passed.
+ // We will check immutable state first, in order to fail quickly before
+ // moving on to state that we can mutate.
+ if errs := r.isMachineOutdated(&scope.ProviderMachine.Spec, instanceDescription); len(errs) > 0 {
+ return errors.Errorf("found attempt to change immutable state for machine %q: %+q", scope.Name(), errs)
+ }
+
+ existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*scope.GetInstanceID())
+ if err != nil {
+ return err
+ }
+
+ // Ensure that the security groups are correct.
+ _, err = r.ensureSecurityGroups(
+ ec2svc,
+ scope,
+ scope.ProviderMachine.Spec.AdditionalSecurityGroups,
+ existingSecurityGroups,
+ )
+ if err != nil {
+ return errors.Errorf("failed to apply security groups: %+v", err)
+ }
+
+ // Ensure that the tags are correct.
+ _, err = r.ensureTags(
+ ec2svc,
+ scope.ProviderMachine,
+ scope.GetInstanceID(),
+ scope.ProviderMachine.Spec.AdditionalTags,
+ )
+ if err != nil {
+ return errors.Errorf("failed to ensure tags: %+v", err)
+ }
+
+ return nil
+}
+
+func (r *ReconcileAWSMachine) reconcileLBAttachment(scope *actuators.MachineScope, i *infrav1.Instance) error {
+ if !scope.IsControlPlane() {
+ return nil
+ }
+
+ elbsvc := elb.NewService(scope.Parent)
+ if err := elbsvc.RegisterInstanceWithAPIServerELB(i.ID); err != nil {
+ return errors.Wrapf(err, "could not register control plane instance %q with load balancer", i.ID)
+ }
+ return nil
+}
+
+// isMachineOudated checks that no immutable fields have been updated in an
+// Update request.
+// Returns a slice of errors representing attempts to change immutable state
+func (r *ReconcileAWSMachine) isMachineOutdated(spec *infrav1.AWSMachineSpec, i *infrav1.Instance) (errs []error) {
+ // Instance Type
+ if spec.InstanceType != i.Type {
+ errs = append(errs, errors.Errorf("instance type cannot be mutated from %q to %q", i.Type, spec.InstanceType))
+ }
+
+ // IAM Profile
+ if spec.IAMInstanceProfile != i.IAMProfile {
+ errs = append(errs, errors.Errorf("instance IAM profile cannot be mutated from %q to %q", i.IAMProfile, spec.IAMInstanceProfile))
+ }
+
+ // SSH Key Name
+ if spec.KeyName != aws.StringValue(i.KeyName) {
+ errs = append(errs, errors.Errorf("SSH key name cannot be mutated from %q to %q", aws.StringValue(i.KeyName), spec.KeyName))
+ }
+
+ // Root Device Size
+ if spec.RootDeviceSize > 0 && spec.RootDeviceSize != i.RootDeviceSize {
+ errs = append(errs, errors.Errorf("Root volume size cannot be mutated from %v to %v", i.RootDeviceSize, spec.RootDeviceSize))
+ }
+
+ // Subnet ID
+ // spec.Subnet is a *AWSResourceReference and could technically be
+ // a *string, ARN or Filter. However, elsewhere in the code it is only used
+ // as a *string, so do the same here.
+ if spec.Subnet != nil {
+ if aws.StringValue(spec.Subnet.ID) != i.SubnetID {
+ errs = append(errs, errors.Errorf("machine subnet ID cannot be mutated from %q to %q",
+ i.SubnetID, aws.StringValue(spec.Subnet.ID)))
+ }
+ }
+
+ // PublicIP check is a little more complicated as the machineConfig is a
+ // simple bool indicating if the instance should have a public IP or not,
+ // while the instanceDescription contains the public IP assigned to the
+ // instance.
+ // Work out whether the instance already has a public IP or not based on
+ // the length of the PublicIP string. Anything >0 is assumed to mean it does
+ // have a public IP.
+ instanceHasPublicIP := false
+ if len(aws.StringValue(i.PublicIP)) > 0 {
+ instanceHasPublicIP = true
+ }
+
+ if aws.BoolValue(spec.PublicIP) != instanceHasPublicIP {
+ errs = append(errs, errors.Errorf(`public IP setting cannot be mutated from "%v" to "%v"`,
+ instanceHasPublicIP, aws.BoolValue(spec.PublicIP)))
+ }
+
+ return errs
+}
diff --git a/pkg/controller/awsmachine/awsmachine_controller_suite_test.go b/pkg/controller/awsmachine/awsmachine_controller_suite_test.go
index 2a6f3f1c74..1cad77411c 100644
--- a/pkg/controller/awsmachine/awsmachine_controller_suite_test.go
+++ b/pkg/controller/awsmachine/awsmachine_controller_suite_test.go
@@ -23,21 +23,25 @@ import (
"sync"
"testing"
- "github.com/onsi/gomega"
+ . "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis"
+ capi "sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var cfg *rest.Config
func TestMain(m *testing.M) {
t := &envtest.Environment{
- CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
+ CRDDirectoryPaths: []string{
+ filepath.Join("..", "..", "..", "vendor", "sigs.k8s.io", "cluster-api", "config", "crds"),
+ filepath.Join("..", "..", "..", "config", "crds"),
+ },
}
+ capi.AddToScheme(scheme.Scheme)
apis.AddToScheme(scheme.Scheme)
var err error
@@ -50,26 +54,14 @@ func TestMain(m *testing.M) {
os.Exit(code)
}
-// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and
-// writes the request to requests after Reconcile is finished.
-func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) {
- requests := make(chan reconcile.Request)
- fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
- result, err := inner.Reconcile(req)
- requests <- req
- return result, err
- })
- return fn, requests
-}
-
// StartTestManager adds recFn
-func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) {
+func StartTestManager(mgr manager.Manager) (chan struct{}, *sync.WaitGroup) {
stop := make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
- g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred())
+ Expect(mgr.Start(stop)).NotTo(HaveOccurred())
}()
return stop, wg
}
diff --git a/pkg/controller/awsmachine/awsmachine_controller_test.go b/pkg/controller/awsmachine/awsmachine_controller_test.go
index 75d621448e..630b73481e 100644
--- a/pkg/controller/awsmachine/awsmachine_controller_test.go
+++ b/pkg/controller/awsmachine/awsmachine_controller_test.go
@@ -20,53 +20,51 @@ import (
"testing"
"time"
- "github.com/onsi/gomega"
+ . "github.com/onsi/gomega"
"golang.org/x/net/context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- infrastructurev1alpha2 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
-var c client.Client
-
-var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}}
-
const timeout = time.Second * 5
func TestReconcile(t *testing.T) {
- g := gomega.NewGomegaWithT(t)
- instance := &infrastructurev1alpha2.AWSMachine{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
+ RegisterTestingT(t)
+ ctx := context.Background()
+ instance := &infrav1.AWSMachine{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
// Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a
// channel when it is finished.
mgr, err := manager.New(cfg, manager.Options{})
- g.Expect(err).NotTo(gomega.HaveOccurred())
- c = mgr.GetClient()
-
- recFn, requests := SetupTestReconcile(newReconciler(mgr))
- g.Expect(add(mgr, recFn)).To(gomega.Succeed())
-
- stopMgr, mgrStopped := StartTestManager(mgr, g)
+ Expect(err).NotTo(HaveOccurred())
+ c := mgr.GetClient()
+ Expect(add(mgr, newReconciler(mgr))).To(Succeed())
+ stopMgr, mgrStopped := StartTestManager(mgr)
defer func() {
close(stopMgr)
mgrStopped.Wait()
}()
// Create the AWSMachine object and expect the Reconcile
- err = c.Create(context.TODO(), instance)
+ err = c.Create(ctx, instance)
// The instance object may not be a valid object because it might be missing some required fields.
// Please modify the instance object by adding required fields and then remove the following if statement.
if apierrors.IsInvalid(err) {
t.Logf("failed to create object, got an invalid object error: %v", err)
return
}
- g.Expect(err).NotTo(gomega.HaveOccurred())
+ Expect(err).NotTo(HaveOccurred())
+ Eventually(func() bool {
+ key := client.ObjectKey{Name: instance.Name, Namespace: instance.Namespace}
+ if err := c.Get(ctx, key, instance); err != nil {
+ return false
+ }
+ return true
+ }, timeout).Should(BeTrue())
defer c.Delete(context.TODO(), instance)
- g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest)))
}
diff --git a/pkg/controller/awsmachine/security_groups.go b/pkg/controller/awsmachine/security_groups.go
new file mode 100644
index 0000000000..fe3f90efc3
--- /dev/null
+++ b/pkg/controller/awsmachine/security_groups.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package awsmachine
+
+import (
+ "sort"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
+ service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services"
+)
+
+const (
+ // SecurityGroupsLastAppliedAnnotation is the key for the machine object
+ // annotation which tracks the SecurityGroups that the machine actuator is
+ // responsible for. These are the SecurityGroups that have been handled by
+ // the AdditionalSecurityGroups in the Machine Provider Config.
+ // See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ // for annotation formatting rules.
+ SecurityGroupsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-aws-last-applied-security-groups"
+)
+
+// Ensures that the security groups of the machine are correct
+// Returns bool, error
+// Bool indicates if changes were made or not, allowing the caller to decide
+// if the machine should be updated.
+func (r *ReconcileAWSMachine) ensureSecurityGroups(ec2svc service.EC2MachineInterface, scope *actuators.MachineScope, additional []infrav1.AWSResourceReference, existing map[string][]string) (bool, error) {
+ annotation, err := r.machineAnnotationJSON(scope.ProviderMachine, SecurityGroupsLastAppliedAnnotation)
+ if err != nil {
+ return false, err
+ }
+
+ core, err := ec2svc.GetCoreSecurityGroups(scope)
+ if err != nil {
+ return false, err
+ }
+ changed, ids := r.securityGroupsChanged(annotation, core, additional, existing)
+ if !changed {
+ return false, nil
+ }
+
+ if err := ec2svc.UpdateInstanceSecurityGroups(*scope.GetInstanceID(), ids); err != nil {
+ return false, err
+ }
+
+ // Build and store annotation.
+ newAnnotation := make(map[string]interface{}, len(additional))
+ for _, id := range additional {
+ newAnnotation[*id.ID] = struct{}{}
+ }
+
+ if err := r.updateMachineAnnotationJSON(scope.ProviderMachine, SecurityGroupsLastAppliedAnnotation, newAnnotation); err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+// securityGroupsChanged determines which security groups to delete and which to add.
+func (r *ReconcileAWSMachine) securityGroupsChanged(annotation map[string]interface{}, core []string, additional []infrav1.AWSResourceReference, existing map[string][]string) (bool, []string) {
+ state := map[string]bool{}
+ for _, s := range additional {
+ state[*s.ID] = true
+ }
+
+ // Loop over `annotation`, checking the state for things that were deleted since last time.
+ // If we find something in the `annotation`, but not in the state, we flag it as `false` (not found, deleted).
+ for groupID := range annotation {
+ if _, ok := state[groupID]; !ok {
+ state[groupID] = false
+ }
+ }
+
+ // add (or add back) the core security groups
+ for _, s := range core {
+ state[s] = true
+ }
+
+ // Build the security group list.
+ res := []string{}
+ for id, keep := range state {
+ if keep {
+ res = append(res, id)
+ }
+ }
+
+ for _, actual := range existing {
+ if len(actual) != len(res) {
+ return true, res
+ }
+
+ // Length is the same, check if the ids are the same too.
+ sort.Strings(actual)
+ sort.Strings(res)
+ for i, id := range res {
+ if actual[i] != id {
+ return true, res
+ }
+ }
+ }
+
+ return false, res
+}
diff --git a/pkg/controller/awsmachine/tags.go b/pkg/controller/awsmachine/tags.go
new file mode 100644
index 0000000000..2fa8923be7
--- /dev/null
+++ b/pkg/controller/awsmachine/tags.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package awsmachine
+
+import (
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
+ service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services"
+)
+
+const (
+ // TagsLastAppliedAnnotation is the key for the machine object annotation
+ // which tracks the SecurityGroups that the machine actuator is responsible
+ // for. These are the SecurityGroups that have been handled by the
+ // AdditionalTags in the Machine Provider Config.
+ // See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ // for annotation formatting rules.
+ TagsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-aws-last-applied-tags"
+)
+
+// Ensure that the tags of the machine are correct
+// Returns bool, error
+// Bool indicates if changes were made or not, allowing the caller to decide
+// if the machine should be updated.
+func (r *ReconcileAWSMachine) ensureTags(svc service.EC2MachineInterface, machine *infrav1.AWSMachine, instanceID *string, additionalTags map[string]string) (bool, error) {
+ annotation, err := r.machineAnnotationJSON(machine, TagsLastAppliedAnnotation)
+ if err != nil {
+ return false, err
+ }
+
+ // Check if the instance tags were changed. If they were, update them.
+ // It would be possible here to only send new/updated tags, but for the
+ // moment we send everything, even if only a single tag was created or
+ // upated.
+ changed, created, deleted, newAnnotation := r.tagsChanged(annotation, additionalTags)
+ if changed {
+ err = svc.UpdateResourceTags(instanceID, created, deleted)
+ if err != nil {
+ return false, err
+ }
+
+ // We also need to update the annotation if anything changed.
+ err = r.updateMachineAnnotationJSON(machine, TagsLastAppliedAnnotation, newAnnotation)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ return changed, nil
+}
+
+// tagsChanged determines which tags to delete and which to add.
+func (r *ReconcileAWSMachine) tagsChanged(annotation map[string]interface{}, src map[string]string) (bool, map[string]string, map[string]string, map[string]interface{}) {
+ // Bool tracking if we found any changed state.
+ changed := false
+
+ // Tracking for created/updated
+ created := map[string]string{}
+
+ // Tracking for tags that were deleted.
+ deleted := map[string]string{}
+
+ // The new annotation that we need to set if anything is created/updated.
+ newAnnotation := map[string]interface{}{}
+
+ // Loop over annotation, checking if entries are in src.
+ // If an entry is present in annotation but not src, it has been deleted
+ // since last time. We flag this in the deleted map.
+ for t, v := range annotation {
+ _, ok := src[t]
+
+ // Entry isn't in src, it has been deleted.
+ if !ok {
+ // Cast v to a string here. This should be fine, tags are always
+ // strings.
+ deleted[t] = v.(string)
+ changed = true
+ }
+ }
+
+ // Loop over src, checking for entries in annotation.
+ //
+ // If an entry is in src, but not annotation, it has been created since
+ // last time.
+ //
+ // If an entry is in both src and annotation, we compare their values, if
+ // the value in src differs from that in annotation, the tag has been
+ // updated since last time.
+ for t, v := range src {
+ av, ok := annotation[t]
+
+ // Entries in the src always need to be noted in the newAnnotation. We
+ // know they're going to be created or updated.
+ newAnnotation[t] = v
+
+ // Entry isn't in annotation, it's new.
+ if !ok {
+ created[t] = v
+ newAnnotation[t] = v
+ changed = true
+ continue
+ }
+
+ // Entry is in annotation, has the value changed?
+ if v != av {
+ created[t] = v
+ changed = true
+ }
+
+ // Entry existed in both src and annotation, and their values were
+ // equal. Nothing to do.
+ }
+
+ // We made it through the loop, and everything that was in src, was also
+ // in dst. Nothing changed.
+ return changed, created, deleted, newAnnotation
+}
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
index 595017d985..e0a5d1c5ac 100644
--- a/pkg/controller/controller.go
+++ b/pkg/controller/controller.go
@@ -24,7 +24,7 @@ import (
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=create;get;delete
// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=awsprovider.k8s.io,resources=awsclusterproviderconfigs;awsclusterproviderstatuses,verbs=get;list;watch;create;update;patch;delete
-// +kubebuilder:rbac:groups=cluster.sigs.k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=cluster.sigs.k8s.io,resources=machines;machines/status;clusters;clusters/status,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.sigs.k8s.io,resources=awsmachines;awsmachines/status,verbs=get;list;watch;create;update;patch;delete
// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
diff --git a/pkg/deployer/deployer.go b/pkg/deployer/deployer.go
index de89c7b126..0b47e25736 100644
--- a/pkg/deployer/deployer.go
+++ b/pkg/deployer/deployer.go
@@ -30,24 +30,24 @@ import (
// Deployer satisfies the ProviderDeployer(https://github.com/kubernetes-sigs/cluster-api/blob/master/cmd/clusterctl/clusterdeployer/clusterdeployer.go) interface.
type Deployer struct {
- scopeGetter actuators.ScopeGetter
+ scopeGetter actuators.ClusterScopeGetter
}
// Params is used to create a new deployer.
type Params struct {
- ScopeGetter actuators.ScopeGetter
+ ClusterScopeGetter actuators.ClusterScopeGetter
}
// New returns a new Deployer.
func New(params Params) *Deployer {
return &Deployer{
- scopeGetter: params.ScopeGetter,
+ scopeGetter: params.ClusterScopeGetter,
}
}
// GetIP returns the IP of a machine, but this is going away.
func (d *Deployer) GetIP(cluster *clusterv1.Cluster, _ *clusterv1.Machine) (string, error) {
- scope, err := d.scopeGetter.GetScope(actuators.ScopeParams{Cluster: cluster})
+ scope, err := d.scopeGetter.ClusterScope(actuators.ClusterScopeParams{Cluster: cluster})
if err != nil {
return "", err
}
diff --git a/pkg/deployer/deployer_test.go b/pkg/deployer/deployer_test.go
index 00a8cc4fe7..ad5530876a 100644
--- a/pkg/deployer/deployer_test.go
+++ b/pkg/deployer/deployer_test.go
@@ -36,9 +36,9 @@ type scopeGetter struct {
actuators.AWSClients
}
-func (s *scopeGetter) GetScope(params actuators.ScopeParams) (*actuators.Scope, error) {
+func (s *scopeGetter) ClusterScope(params actuators.ClusterScopeParams) (*actuators.ClusterScope, error) {
params.AWSClients = s.AWSClients
- return actuators.NewScope(params)
+ return actuators.NewClusterScope(params)
}
func TestGetIP(t *testing.T) {
@@ -142,7 +142,7 @@ func TestGetIP(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- deployer := deployer.New(deployer.Params{ScopeGetter: &scopeGetter{
+ deployer := deployer.New(deployer.Params{ClusterScopeGetter: &scopeGetter{
actuators.AWSClients{
EC2: ec2Mock,
ELB: elbMock,
diff --git a/vendor/github.com/go-logr/zapr/.gitignore b/vendor/github.com/go-logr/zapr/.gitignore
new file mode 100644
index 0000000000..5ba77727f1
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/.gitignore
@@ -0,0 +1,3 @@
+*~
+*.swp
+/vendor
diff --git a/vendor/github.com/go-logr/zapr/BUILD.bazel b/vendor/github.com/go-logr/zapr/BUILD.bazel
new file mode 100644
index 0000000000..ba2fd1e1d9
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["zapr.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/github.com/go-logr/zapr",
+ importpath = "github.com/go-logr/zapr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/go-logr/logr:go_default_library",
+ "//vendor/go.uber.org/zap:go_default_library",
+ "//vendor/go.uber.org/zap/zapcore:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/go-logr/zapr/Gopkg.lock b/vendor/github.com/go-logr/zapr/Gopkg.lock
new file mode 100644
index 0000000000..4e0d08ecaa
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/Gopkg.lock
@@ -0,0 +1,40 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "github.com/go-logr/logr"
+ packages = ["."]
+ revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e"
+
+[[projects]]
+ name = "go.uber.org/atomic"
+ packages = ["."]
+ revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
+ version = "v1.3.2"
+
+[[projects]]
+ name = "go.uber.org/multierr"
+ packages = ["."]
+ revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "go.uber.org/zap"
+ packages = [
+ ".",
+ "buffer",
+ "internal/bufferpool",
+ "internal/color",
+ "internal/exit",
+ "zapcore"
+ ]
+ revision = "eeedf312bc6c57391d84767a4cd413f02a917974"
+ version = "v1.8.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "9b4b2f75bc457ddc6ebb276c32fc8e30525b6133ee76886c804ba0a6b815abc2"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/go-logr/zapr/Gopkg.toml b/vendor/github.com/go-logr/zapr/Gopkg.toml
new file mode 100644
index 0000000000..78944774cd
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/Gopkg.toml
@@ -0,0 +1,38 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/go-logr/logr"
+
+[[constraint]]
+ name = "go.uber.org/zap"
+ version = "1.8.0"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/github.com/go-logr/zapr/LICENSE b/vendor/github.com/go-logr/zapr/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-logr/zapr/README.md b/vendor/github.com/go-logr/zapr/README.md
new file mode 100644
index 0000000000..8472875fa3
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/README.md
@@ -0,0 +1,45 @@
+Zapr :zap:
+==========
+
+A [logr](https://github.com/go-logr/logr) implementation using
+[Zap](go.uber.org/zap).
+
+Usage
+-----
+
+```go
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+ "github.com/go-logr/logr"
+ "github.com/directxman12/zapr"
+)
+
+func main() {
+ var log logr.Logger
+
+ zapLog, err := zap.NewDevelopment()
+ if err != nil {
+ panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
+ }
+ log = zapr.NewLogger(zapLog)
+
+ log.Info("Logr in action!", "the answer", 42)
+}
+```
+
+Implementation Details
+----------------------
+
+For the most part, concepts in Zap correspond directly with those in logr.
+
+Unlike Zap, all fields *must* be in the form of suggared fields --
+it's illegal to pass a strongly-typed Zap field in a key position to any
+of the logging methods (`Log`, `Error`).
+
+Levels in logr correspond to custom debug levels in Zap. Any given level
+in logr is represents by its inverse in Zap (`zapLevel = -1*logrLevel`).
+
+For example `V(2)` is equivalent to log level -2 in Zap, while `V(1)` is
+equivalent to Zap's `DebugLevel`.
diff --git a/vendor/github.com/go-logr/zapr/zapr.go b/vendor/github.com/go-logr/zapr/zapr.go
new file mode 100644
index 0000000000..a9a10ae2e1
--- /dev/null
+++ b/vendor/github.com/go-logr/zapr/zapr.go
@@ -0,0 +1,163 @@
+// Copyright 2018 Solly Ross
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// package zapr defines an implementation of the github.com/go-logr/logr
+// interfaces built on top of Zap (go.uber.org/zap).
+//
+// Usage
+//
+// A new logr.Logger can be constructed from an existing zap.Logger using
+// the NewLogger function:
+//
+// log := zapr.NewLogger(someZapLogger)
+//
+// Implementation Details
+//
+// For the most part, concepts in Zap correspond directly with those in
+// logr.
+//
+// Unlike Zap, all fields *must* be in the form of suggared fields --
+// it's illegal to pass a strongly-typed Zap field in a key position
+// to any of the log methods.
+//
+// Levels in logr correspond to custom debug levels in Zap. Any given level
+// in logr is represents by its inverse in zap (`zapLevel = -1*logrLevel`).
+// For example V(2) is equivalent to log level -2 in Zap, while V(1) is
+// equivalent to Zap's DebugLevel.
+package zapr
+
+import (
+ "github.com/go-logr/logr"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// noopInfoLogger is a logr.InfoLogger that's always disabled, and does nothing.
+type noopInfoLogger struct{}
+
+func (l *noopInfoLogger) Enabled() bool { return false }
+func (l *noopInfoLogger) Info(_ string, _ ...interface{}) {}
+
+var disabledInfoLogger = &noopInfoLogger{}
+
+// NB: right now, we always use the equivalent of sugared logging.
+// This is necessary, since logr doesn't define non-suggared types,
+// and using zap-specific non-suggared types would make uses tied
+// directly to Zap.
+
+// infoLogger is a logr.InfoLogger that uses Zap to log at a particular
+// level. The level has already been converted to a Zap level, which
+// is to say that `logrLevel = -1*zapLevel`.
+type infoLogger struct {
+ lvl zapcore.Level
+ l *zap.Logger
+}
+
+func (l *infoLogger) Enabled() bool { return true }
+func (l *infoLogger) Info(msg string, keysAndVals ...interface{}) {
+ if checkedEntry := l.l.Check(l.lvl, msg); checkedEntry != nil {
+ checkedEntry.Write(handleFields(l.l, keysAndVals)...)
+ }
+}
+
+// zapLogger is a logr.Logger that uses Zap to log.
+type zapLogger struct {
+ // NB: this looks very similar to zap.SugaredLogger, but
+ // deals with our desire to have multiple verbosity levels.
+ l *zap.Logger
+ infoLogger
+}
+
+// handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes
+// additional pre-converted Zap fields, for use with automatically attached fields, like
+// `error`.
+func handleFields(l *zap.Logger, args []interface{}, additional ...zap.Field) []zap.Field {
+ // a slightly modified version of zap.SugaredLogger.sweetenFields
+ if len(args) == 0 {
+ // fast-return if we have no suggared fields.
+ return additional
+ }
+
+ // unlike Zap, we can be pretty sure users aren't passing structured
+ // fields (since logr has no concept of that), so guess that we need a
+ // little less space.
+ fields := make([]zap.Field, 0, len(args)/2+len(additional))
+ for i := 0; i < len(args); {
+ // check just in case for strongly-typed Zap fields, which is illegal (since
+ // it breaks implementation agnosticism), so we can give a better error message.
+ if _, ok := args[i].(zap.Field); ok {
+ l.DPanic("strongly-typed Zap Field passed to logr", zap.Any("zap field", args[i]))
+ break
+ }
+
+ // make sure this isn't a mismatched key
+ if i == len(args)-1 {
+ l.DPanic("odd number of arguments passed as key-value pairs for logging", zap.Any("ignored key", args[i]))
+ break
+ }
+
+ // process a key-value pair,
+ // ensuring that the key is a string
+ key, val := args[i], args[i+1]
+ keyStr, isString := key.(string)
+ if !isString {
+ // if the key isn't a string, DPanic and stop logging
+ l.DPanic("non-string key argument passed to logging, ignoring all later arguments", zap.Any("invalid key", key))
+ break
+ }
+
+ fields = append(fields, zap.Any(keyStr, val))
+ i += 2
+ }
+
+ return append(fields, additional...)
+}
+
+func (l *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) {
+ if checkedEntry := l.l.Check(zap.ErrorLevel, msg); checkedEntry != nil {
+ checkedEntry.Write(handleFields(l.l, keysAndVals, zap.Error(err))...)
+ }
+}
+
+func (l *zapLogger) V(level int) logr.InfoLogger {
+ lvl := zapcore.Level(-1 * level)
+ if l.l.Core().Enabled(lvl) {
+ return &infoLogger{
+ lvl: lvl,
+ l: l.l,
+ }
+ }
+ return disabledInfoLogger
+}
+
+func (l *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger {
+ newLogger := l.l.With(handleFields(l.l, keysAndValues)...)
+ return NewLogger(newLogger)
+}
+
+func (l *zapLogger) WithName(name string) logr.Logger {
+ newLogger := l.l.Named(name)
+ return NewLogger(newLogger)
+}
+
+// NewLogger creates a new logr.Logger using the given Zap Logger to log.
+func NewLogger(l *zap.Logger) logr.Logger {
+ return &zapLogger{
+ l: l,
+ infoLogger: infoLogger{
+ l: l,
+ lvl: zap.InfoLevel,
+ },
+ }
+}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000000..6d4d1be7b5
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000000..0a4504f110
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,11 @@
+.DS_Store
+/vendor
+/cover
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 0000000000..58957222a3
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,23 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+go:
+ - 1.7
+ - 1.8
+ - 1.9
+
+cache:
+ directories:
+ - vendor
+
+install:
+ - make install_ci
+
+script:
+ - make test_ci
+ - scripts/test-ubergo.sh
+ - make lint
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/BUILD.bazel b/vendor/go.uber.org/atomic/BUILD.bazel
new file mode 100644
index 0000000000..9e3ca482dc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/BUILD.bazel
@@ -0,0 +1,12 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "atomic.go",
+ "string.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/atomic",
+ importpath = "go.uber.org/atomic",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000000..8765c9fbc6
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000000..dfc63d9db4
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,64 @@
+PACKAGES := $(shell glide nv)
+# Many Go tools take file globs or directories as arguments instead of packages.
+PACKAGE_FILES ?= *.go
+
+
+# The linting tools evolve with each Go version, so run them only on the latest
+# stable release.
+GO_VERSION := $(shell go version | cut -d " " -f 3)
+GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION)))
+LINTABLE_MINOR_VERSIONS := 7 8
+ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),)
+SHOULD_LINT := true
+endif
+
+
+export GO15VENDOREXPERIMENT=1
+
+
+.PHONY: build
+build:
+ go build -i $(PACKAGES)
+
+
+.PHONY: install
+install:
+ glide --version || go get github.com/Masterminds/glide
+ glide install
+
+
+.PHONY: test
+test:
+ go test -cover -race $(PACKAGES)
+
+
+.PHONY: install_ci
+install_ci: install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+ifdef SHOULD_LINT
+ go get github.com/golang/lint/golint
+endif
+
+.PHONY: lint
+lint:
+ifdef SHOULD_LINT
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;)
+ @echo "Checking lint..."
+ @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;)
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log
+ @[ ! -s lint.log ]
+else
+ @echo "Skipping linters on" $(GO_VERSION)
+endif
+
+
+.PHONY: test_ci
+test_ci: install_ci build
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000000..6505abf65c
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,36 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+`go get -u go.uber.org/atomic`
+
+## Usage
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+Stable.
+
+
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.org/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go
new file mode 100644
index 0000000000..1db6849fca
--- /dev/null
+++ b/vendor/go.uber.org/atomic/atomic.go
@@ -0,0 +1,351 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+// Int32 is an atomic wrapper around an int32.
+type Int32 struct{ v int32 }
+
+// NewInt32 creates an Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// Int64 is an atomic wrapper around an int64.
+type Int64 struct{ v int64 }
+
+// NewInt64 creates an Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// Uint32 is an atomic wrapper around an uint32.
+type Uint32 struct{ v uint32 }
+
+// NewUint32 creates a Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// Uint64 is an atomic wrapper around a uint64.
+type Uint64 struct{ v uint64 }
+
+// NewUint64 creates a Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// Bool is an atomic Boolean.
+type Bool struct{ v uint32 }
+
+// NewBool creates a Bool.
+func NewBool(initial bool) *Bool {
+ return &Bool{boolToInt(initial)}
+}
+
+// Load atomically loads the Boolean.
+func (b *Bool) Load() bool {
+ return truthy(atomic.LoadUint32(&b.v))
+}
+
+// CAS is an atomic compare-and-swap.
+func (b *Bool) CAS(old, new bool) bool {
+ return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
+}
+
+// Store atomically stores the passed value.
+func (b *Bool) Store(new bool) {
+ atomic.StoreUint32(&b.v, boolToInt(new))
+}
+
+// Swap sets the given value and returns the previous value.
+func (b *Bool) Swap(new bool) bool {
+ return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ return truthy(atomic.AddUint32(&b.v, 1) - 1)
+}
+
+func truthy(n uint32) bool {
+ return n&1 == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Float64 is an atomic wrapper around float64.
+type Float64 struct {
+ v uint64
+}
+
+// NewFloat64 creates a Float64.
+func NewFloat64(f float64) *Float64 {
+ return &Float64{math.Float64bits(f)}
+}
+
+// Load atomically loads the wrapped value.
+func (f *Float64) Load() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&f.v))
+}
+
+// Store atomically stores the passed value.
+func (f *Float64) Store(s float64) {
+ atomic.StoreUint64(&f.v, math.Float64bits(s))
+}
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// CAS is an atomic compare-and-swap.
+func (f *Float64) CAS(old, new float64) bool {
+ return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
+}
+
+// Duration is an atomic wrapper around time.Duration
+// https://godoc.org/time#Duration
+type Duration struct {
+ v Int64
+}
+
+// NewDuration creates a Duration.
+func NewDuration(d time.Duration) *Duration {
+ return &Duration{v: *NewInt64(int64(d))}
+}
+
+// Load atomically loads the wrapped value.
+func (d *Duration) Load() time.Duration {
+ return time.Duration(d.v.Load())
+}
+
+// Store atomically stores the passed value.
+func (d *Duration) Store(n time.Duration) {
+ d.v.Store(int64(n))
+}
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// Swap atomically swaps the wrapped time.Duration and returns the old value.
+func (d *Duration) Swap(n time.Duration) time.Duration {
+ return time.Duration(d.v.Swap(int64(n)))
+}
+
+// CAS is an atomic compare-and-swap.
+func (d *Duration) CAS(old, new time.Duration) bool {
+ return d.v.CAS(int64(old), int64(new))
+}
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct{ atomic.Value }
diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock
new file mode 100644
index 0000000000..3c72c59976
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.lock
@@ -0,0 +1,17 @@
+hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53
+updated: 2016-10-27T00:10:51.16960137-07:00
+imports: []
+testImports:
+- name: github.com/davecgh/go-spew
+ version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
+ subpackages:
+ - spew
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/stretchr/testify
+ version: d77da356e56a7428ad25149ca77381849a6a5232
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml
new file mode 100644
index 0000000000..4cf608ec0f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.yaml
@@ -0,0 +1,6 @@
+package: go.uber.org/atomic
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000000..ede8136fac
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper around Value for strings.
+type String struct{ v Value }
+
+// NewString creates a String.
+func NewString(str string) *String {
+ s := &String{}
+ if str != "" {
+ s.Store(str)
+ }
+ return s
+}
+
+// Load atomically loads the wrapped string.
+func (s *String) Load() string {
+ v := s.v.Load()
+ if v == nil {
+ return ""
+ }
+ return v.(string)
+}
+
+// Store atomically stores the passed string.
+// Note: Converting the string to an interface{} to store in the Value
+// requires an allocation.
+func (s *String) Store(str string) {
+ s.v.Store(str)
+}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000000..6d4d1be7b5
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000000..61ead86667
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1 @@
+/vendor
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
new file mode 100644
index 0000000000..5ffa8fed48
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.travis.yml
@@ -0,0 +1,33 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/multierr
+
+env:
+ global:
+ - GO15VENDOREXPERIMENT=1
+
+go:
+ - 1.7
+ - 1.8
+ - tip
+
+cache:
+ directories:
+ - vendor
+
+before_install:
+- go version
+
+install:
+- |
+ set -e
+ make install_ci
+
+script:
+- |
+ set -e
+ make lint
+ make test_ci
+
+after_success:
+- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/BUILD.bazel b/vendor/go.uber.org/multierr/BUILD.bazel
new file mode 100644
index 0000000000..0d6c17fbe6
--- /dev/null
+++ b/vendor/go.uber.org/multierr/BUILD.bazel
@@ -0,0 +1,10 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["error.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/multierr",
+ importpath = "go.uber.org/multierr",
+ visibility = ["//visibility:public"],
+ deps = ["//vendor/go.uber.org/atomic:go_default_library"],
+)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000000..898445d063
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,28 @@
+Releases
+========
+
+v1.1.0 (2017-06-30)
+===================
+
+- Added an `Errors(error) []error` function to extract the underlying list of
+ errors for a multierr error.
+
+
+v1.0.0 (2017-05-31)
+===================
+
+No changes since v0.2.0. This release is committing to making no breaking
+changes to the current API in the 1.X series.
+
+
+v0.2.0 (2017-04-11)
+===================
+
+- Repeatedly appending to the same error is now faster due to fewer
+ allocations.
+
+
+v0.1.0 (2017-31-03)
+===================
+
+- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000000..858e02475f
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000000..a7437d061f
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,74 @@
+export GO15VENDOREXPERIMENT=1
+
+PACKAGES := $(shell glide nv)
+
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: install
+install:
+ glide --version || go get github.com/Masterminds/glide
+ glide install
+
+.PHONY: build
+build:
+ go build -i $(PACKAGES)
+
+.PHONY: test
+test:
+ go test -cover -race $(PACKAGES)
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
+
+.PHONY: govet
+govet:
+ $(eval VET_LOG := $(shell mktemp -t govet.XXXXX))
+ @go vet $(PACKAGES) 2>&1 \
+ | grep -v '^exit status' > $(VET_LOG) || true
+ @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false)
+
+.PHONY: golint
+golint:
+ @go get github.com/golang/lint/golint
+ $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX))
+ @cat /dev/null > $(LINT_LOG)
+ @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;)
+ @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false)
+
+.PHONY: staticcheck
+staticcheck:
+ @go get honnef.co/go/tools/cmd/staticcheck
+ $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX))
+ @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true
+ @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false)
+
+.PHONY: lint
+lint: gofmt govet golint staticcheck
+
+.PHONY: cover
+cover:
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
+ go tool cover -html=cover.out -o cover.html
+
+update-license:
+ @go get go.uber.org/tools/update-license
+ @update-license \
+ $(shell go list -json $(PACKAGES) | \
+ jq -r '.Dir + "/" + (.GoFiles | .[])')
+
+##############################################################################
+
+.PHONY: install_ci
+install_ci: install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+
+.PHONY: test_ci
+test_ci: install_ci
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000000..065088f641
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,23 @@
+# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+`multierr` allows combining one or more Go `error`s together.
+
+## Installation
+
+ go get -u go.uber.org/multierr
+
+## Status
+
+Stable: No breaking changes will be made before 2.0.
+
+-------------------------------------------------------------------------------
+
+Released under the [MIT License].
+
+[MIT License]: LICENSE.txt
+[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
+[doc]: https://godoc.org/go.uber.org/multierr
+[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master
+[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
+[ci]: https://travis-ci.org/uber-go/multierr
+[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000000..de6ce4736c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,401 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// err = multierr.Combine(reader.Close(), writer.Close())
+//
+// This makes it possible to record resource cleanup failures from deferred
+// blocks with the help of named return values.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:")
+// }
+//
+// Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
+package multierr // import "go.uber.org/multierr"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+
+ "go.uber.org/atomic"
+)
+
+var (
+ // Separator for single-line error messages.
+ _singlelineSeparator = []byte("; ")
+
+ _newline = []byte("\n")
+
+ // Prefix for multi-line messages
+ _multilinePrefix = []byte("the following errors occurred:")
+
+ // Prefix for the first and following lines of an item in a list of
+ // multi-line error messages.
+ //
+ // For example, if a single item is:
+ //
+ // foo
+ // bar
+ //
+ // It will become,
+ //
+ // - foo
+ // bar
+ _multilineSeparator = []byte("\n - ")
+ _multilineIndent = []byte(" ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+type errorGroup interface {
+ Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, the returned slice is empty.
+//
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ errors := eg.Errors()
+ result := make([]error, len(errors))
+ copy(result, errors)
+ return result
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+ copyNeeded atomic.Bool
+ errors []error
+}
+
+var _ errorGroup = (*multiError)(nil)
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+ if merr == nil {
+ return nil
+ }
+ return merr.errors
+}
+
+func (merr *multiError) Error() string {
+ if merr == nil {
+ return ""
+ }
+
+ buff := _bufferPool.Get().(*bytes.Buffer)
+ buff.Reset()
+
+ merr.writeSingleline(buff)
+
+ result := buff.String()
+ _bufferPool.Put(buff)
+ return result
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+ if c == 'v' && f.Flag('+') {
+ merr.writeMultiline(f)
+ } else {
+ merr.writeSingleline(f)
+ }
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+ first := true
+ for _, item := range merr.errors {
+ if first {
+ first = false
+ } else {
+ w.Write(_singlelineSeparator)
+ }
+ io.WriteString(w, item.Error())
+ }
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+ w.Write(_multilinePrefix)
+ for _, item := range merr.errors {
+ w.Write(_multilineSeparator)
+ writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+ }
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+ first := true
+ for len(s) > 0 {
+ if first {
+ first = false
+ } else {
+ w.Write(prefix)
+ }
+
+ idx := strings.IndexByte(s, '\n')
+ if idx < 0 {
+ idx = len(s) - 1
+ }
+
+ io.WriteString(w, s[:idx+1])
+ s = s[idx+1:]
+ }
+}
+
+type inspectResult struct {
+ // Number of top-level non-nil errors
+ Count int
+
+ // Total number of errors including multiErrors
+ Capacity int
+
+ // Index of the first non-nil error in the list. Value is meaningless if
+ // Count is zero.
+ FirstErrorIdx int
+
+ // Whether the list contains at least one multiError
+ ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+ first := true
+ for i, err := range errors {
+ if err == nil {
+ continue
+ }
+
+ res.Count++
+ if first {
+ first = false
+ res.FirstErrorIdx = i
+ }
+
+ if merr, ok := err.(*multiError); ok {
+ res.Capacity += len(merr.errors)
+ res.ContainsMultiError = true
+ } else {
+ res.Capacity++
+ }
+ }
+ return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+ res := inspect(errors)
+ switch res.Count {
+ case 0:
+ return nil
+ case 1:
+ // only one non-nil entry
+ return errors[res.FirstErrorIdx]
+ case len(errors):
+ if !res.ContainsMultiError {
+ // already flat
+ return &multiError{errors: errors}
+ }
+ }
+
+ nonNilErrs := make([]error, 0, res.Capacity)
+ for _, err := range errors[res.FirstErrorIdx:] {
+ if err == nil {
+ continue
+ }
+
+ if nested, ok := err.(*multiError); ok {
+ nonNilErrs = append(nonNilErrs, nested.errors...)
+ } else {
+ nonNilErrs = append(nonNilErrs, err)
+ }
+ }
+
+ return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// Combine(nil, nil) // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// Combine(err) // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+ return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+func Append(left error, right error) error {
+ switch {
+ case left == nil:
+ return right
+ case right == nil:
+ return left
+ }
+
+ if _, ok := right.(*multiError); !ok {
+ if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+ // Common case where the error on the left is constantly being
+ // appended to.
+ errs := append(l.errors, right)
+ return &multiError{errors: errs}
+ } else if !ok {
+ // Both errors are single errors.
+ return &multiError{errors: []error{left, right}}
+ }
+ }
+
+ // Either right or both, left and right, are multiErrors. Rely on usual
+ // expensive logic.
+ errors := [2]error{left, right}
+ return fromSlice(errors[0:])
+}
diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock
new file mode 100644
index 0000000000..f9ea94c334
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.lock
@@ -0,0 +1,19 @@
+hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221
+updated: 2017-04-10T13:34:45.671678062-07:00
+imports:
+- name: go.uber.org/atomic
+ version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb
+testImports:
+- name: github.com/davecgh/go-spew
+ version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
+ subpackages:
+ - spew
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/stretchr/testify
+ version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
new file mode 100644
index 0000000000..6ef084ec24
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.yaml
@@ -0,0 +1,8 @@
+package: go.uber.org/multierr
+import:
+- package: go.uber.org/atomic
+ version: ^1
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
new file mode 100644
index 0000000000..8e5ca7d3e2
--- /dev/null
+++ b/vendor/go.uber.org/zap/.codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 95% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+ignore:
+ - internal/readme/readme.go
+
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
new file mode 100644
index 0000000000..08fbde6ce2
--- /dev/null
+++ b/vendor/go.uber.org/zap/.gitignore
@@ -0,0 +1,28 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+vendor
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.pprof
+*.out
+*.log
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
new file mode 100644
index 0000000000..c6440db8eb
--- /dev/null
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -0,0 +1,108 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+{{.BenchmarkAddingFields}}
+
+Log a message with a logger that already has 10 fields of context:
+
+{{.BenchmarkAccumulatedContext}}
+
+Log a static string, without any context or `printf`-style templating:
+
+{{.BenchmarkWithoutFields}}
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+
+[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
+[doc]: https://godoc.org/go.uber.org/zap
+[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master
+[ci]: https://travis-ci.org/uber-go/zap
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml
new file mode 100644
index 0000000000..a3321fa2dc
--- /dev/null
+++ b/vendor/go.uber.org/zap/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+sudo: false
+go:
+ - 1.9.x
+ - 1.10.x
+go_import_path: go.uber.org/zap
+env:
+ global:
+ - TEST_TIMEOUT_SCALE=10
+cache:
+ directories:
+ - vendor
+install:
+ - make dependencies
+script:
+ - make lint
+ - make test
+ - make bench
+after_success:
+ - make cover
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/zap/BUILD.bazel b/vendor/go.uber.org/zap/BUILD.bazel
new file mode 100644
index 0000000000..4e5d6c9fe2
--- /dev/null
+++ b/vendor/go.uber.org/zap/BUILD.bazel
@@ -0,0 +1,33 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "array.go",
+ "config.go",
+ "doc.go",
+ "encoder.go",
+ "error.go",
+ "field.go",
+ "flag.go",
+ "global.go",
+ "http_handler.go",
+ "level.go",
+ "logger.go",
+ "options.go",
+ "sink.go",
+ "stacktrace.go",
+ "sugar.go",
+ "time.go",
+ "writer.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap",
+ importpath = "go.uber.org/zap",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/go.uber.org/atomic:go_default_library",
+ "//vendor/go.uber.org/multierr:go_default_library",
+ "//vendor/go.uber.org/zap/internal/bufferpool:go_default_library",
+ "//vendor/go.uber.org/zap/zapcore:go_default_library",
+ ],
+)
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
new file mode 100644
index 0000000000..17d5b49f33
--- /dev/null
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -0,0 +1,305 @@
+# Changelog
+
+## v1.9.1 (06 Aug 2018)
+
+Bugfixes:
+
+* [#614][]: MapObjectEncoder should not ignore empty slices.
+
+## v1.9.0 (19 Jul 2018)
+
+Enhancements:
+* [#602][]: Reduce number of allocations when logging with reflection.
+* [#572][], [#606][]: Expose a registry for third-party logging sinks.
+
+Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
+@dimroc for their contributions to this release.
+
+## v1.8.0 (13 Apr 2018)
+
+Enhancements:
+* [#508][]: Make log level configurable when redirecting the standard
+ library's logger.
+* [#518][]: Add a logger that writes to a `*testing.TB`.
+* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
+
+Bugfixes:
+* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
+
+Thanks to @DiSiqueira and @djui for their contributions to this release.
+
+## v1.7.1 (25 Sep 2017)
+
+Bugfixes:
+* [#504][]: Store strings when using AddByteString with the map encoder.
+
+## v1.7.0 (21 Sep 2017)
+
+Enhancements:
+
+* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
+ to specify the level of the logged messages.
+
+## v1.6.0 (30 Aug 2017)
+
+Enhancements:
+
+* [#491][]: Omit zap stack frames from stacktraces.
+* [#490][]: Add a `ContextMap` method to observer logs for simpler
+ field validation in tests.
+
+## v1.5.0 (22 Jul 2017)
+
+Enhancements:
+
+* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
+* [#465][]: Support user-supplied encoders for logger names.
+
+Bugfixes:
+
+* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
+
+Thanks to @richard-tunein and @pavius for their contributions to this release.
+
+## v1.4.1 (08 Jun 2017)
+
+This release fixes two bugs.
+
+Bugfixes:
+
+* [#435][]: Support a variety of case conventions when unmarshaling levels.
+* [#444][]: Fix a panic in the observer.
+
+## v1.4.0 (12 May 2017)
+
+This release adds a few small features and is fully backward-compatible.
+
+Enhancements:
+
+* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
+ override the Unix-style default.
+* [#425][]: Preserve time zones when logging times.
+* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
+ variety of operations a bit simpler.
+
+## v1.3.0 (25 Apr 2017)
+
+This release adds an enhancement to zap's testing helpers as well as the
+ability to marshal an AtomicLevel. It is fully backward-compatible.
+
+Enhancements:
+
+* [#415][]: Add a substring-filtering helper to zap's observer. This is
+ particularly useful when testing the `SugaredLogger`.
+* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
+
+## v1.2.0 (13 Apr 2017)
+
+This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
+
+Enhancements:
+
+* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
+ `grpclog.Logger`.
+
+## v1.1.0 (31 Mar 2017)
+
+This release fixes two bugs and adds some enhancements to zap's testing helpers.
+It is fully backward-compatible.
+
+Bugfixes:
+
+* [#385][]: Fix caller path trimming on Windows.
+* [#396][]: Fix a panic when attempting to use non-existent directories with
+ zap's configuration struct.
+
+Enhancements:
+
+* [#386][]: Add filtering helpers to zaptest's observing logger.
+
+Thanks to @moitias for contributing to this release.
+
+## v1.0.0 (14 Mar 2017)
+
+This is zap's first stable release. All exported APIs are now final, and no
+further breaking changes will be made in the 1.x release series. Anyone using a
+semver-aware dependency manager should now pin to `^1`.
+
+Breaking changes:
+
+* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
+ casting from `[]byte` to `string`.
+* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
+ `zap.Logger`, and `zap.SugaredLogger`.
+* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
+ clash with other testing helpers.
+
+Bugfixes:
+
+* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
+ for tab-separated console output.
+* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
+ work with concurrency-safe `WriteSyncer` implementations.
+* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
+ systems.
+* [#373][]: Report the correct caller from zap's standard library
+ interoperability wrappers.
+
+Enhancements:
+
+* [#348][]: Add a registry allowing third-party encodings to work with zap's
+ built-in `Config`.
+* [#327][]: Make the representation of logger callers configurable (like times,
+ levels, and durations).
+* [#376][]: Allow third-party encoders to use their own buffer pools, which
+ removes the last performance advantage that zap's encoders have over plugins.
+* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
+ `WriteSyncer`s and lock the result.
+* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
+ Go 1.9).
+* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
+ easier for particularly punctilious users to unit test their application's
+ logging.
+
+Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
+contributions to this release.
+
+## v1.0.0-rc.3 (7 Mar 2017)
+
+This is the third release candidate for zap's stable release. There are no
+breaking changes.
+
+Bugfixes:
+
+* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
+ rather than `[]uint8`.
+
+Enhancements:
+
+* [#307][]: Users can opt into colored output for log levels.
+* [#353][]: In addition to hijacking the output of the standard library's
+ package-global logging functions, users can now construct a zap-backed
+ `log.Logger` instance.
+* [#311][]: Frames from common runtime functions and some of zap's internal
+ machinery are now omitted from stacktraces.
+
+Thanks to @ansel1 and @suyash for their contributions to this release.
+
+## v1.0.0-rc.2 (21 Feb 2017)
+
+This is the second release candidate for zap's stable release. It includes two
+breaking changes.
+
+Breaking changes:
+
+* [#316][]: Zap's global loggers are now fully concurrency-safe
+ (previously, users had to ensure that `ReplaceGlobals` was called before the
+ loggers were in use). However, they must now be accessed via the `L()` and
+ `S()` functions. Users can update their projects with
+
+ ```
+ gofmt -r "zap.L -> zap.L()" -w .
+ gofmt -r "zap.S -> zap.S()" -w .
+ ```
+* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
+ JSON and YAML struct tags on all config structs. This release fixes the tags
+ and adds static analysis to prevent similar bugs in the future.
+
+Bugfixes:
+
+* [#321][]: Redirecting the standard library's `log` output now
+ correctly reports the logger's caller.
+
+Enhancements:
+
+* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
+ errors like those produced by `github.com/pkg/errors`.
+* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
+ now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
+ zap.NewNop()' -w .`.
+* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
+ more informative error.
+
+Thanks to @skipor and @chapsuk for their contributions to this release.
+
+## v1.0.0-rc.1 (14 Feb 2017)
+
+This is the first release candidate for zap's stable release. There are multiple
+breaking changes and improvements from the pre-release version. Most notably:
+
+* **Zap's import path is now "go.uber.org/zap"** — all users will
+ need to update their code.
+* User-facing types and functions remain in the `zap` package. Code relevant
+ largely to extension authors is now in the `zapcore` package.
+* The `zapcore.Core` type makes it easy for third-party packages to use zap's
+ internals but provide a different user-facing API.
+* `Logger` is now a concrete type instead of an interface.
+* A less verbose (though slower) logging API is included by default.
+* Package-global loggers `L` and `S` are included.
+* A human-friendly console encoder is included.
+* A declarative config struct allows common logger configurations to be managed
+ as configuration instead of code.
+* Sampling is more accurate, and doesn't depend on the standard library's shared
+ timer heap.
+
+## v0.1.0-beta.1 (6 Feb 2017)
+
+This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
+upgrade at their leisure. Since this is the first tagged release, there are no
+backward compatibility concerns and all functionality is new.
+
+Early zap adopters should pin to the 0.1.x minor version until they're ready to
+upgrade to the upcoming stable release.
+
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+[#402]: https://github.com/uber-go/zap/pull/402
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+[#487]: https://github.com/uber-go/zap/pull/487
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+[#504]: https://github.com/uber-go/zap/pull/504
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+[#614]: https://github.com/uber-go/zap/pull/614
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..e327d9aa5c
--- /dev/null
+++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-conduct@uber.com. The project
+team will review and investigate all complaints, and will respond in a way
+that it deems appropriate to the circumstances. The project team is obligated
+to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
new file mode 100644
index 0000000000..9454bbaf02
--- /dev/null
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -0,0 +1,81 @@
+# Contributing
+
+We'd love your help making zap the very best structured logging library in Go!
+
+If you'd like to add new exported APIs, please [open an issue][open-issue]
+describing your proposal — discussing API changes ahead of time makes
+pull request review much smoother. In your issue, pull request, and any other
+communications, please remember to treat your fellow contributors with
+respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
+
+Note that you'll need to sign [Uber's Contributor License Agreement][cla]
+before we can accept any of your contributions. If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+
+## Setup
+
+[Fork][fork], then clone the repository:
+
+```
+mkdir -p $GOPATH/src/go.uber.org
+cd $GOPATH/src/go.uber.org
+git clone git@github.com:your_github_username/zap.git
+cd zap
+git remote add upstream https://github.com/uber-go/zap.git
+git fetch upstream
+```
+
+Install zap's dependencies:
+
+```
+make dependencies
+```
+
+Make sure that the tests and the linters pass:
+
+```
+make test
+make lint
+```
+
+If you're not using the minor version of Go specified in the Makefile's
+`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
+fine, but it means that you'll only discover lint failures after you open your
+pull request.
+
+## Making Changes
+
+Start by creating a new branch for your changes:
+
+```
+cd $GOPATH/src/go.uber.org/zap
+git checkout master
+git fetch upstream
+git rebase upstream/master
+git checkout -b cool_new_feature
+```
+
+Make your changes, then ensure that `make lint` and `make test` still pass. If
+you're satisfied with your changes, push them to your fork.
+
+```
+git push origin cool_new_feature
+```
+
+Then use the GitHub UI to open a pull request.
+
+At this point, you're waiting on us to review your changes. We *try* to respond
+to issues and pull requests within a few business days, and we may suggest some
+improvements or alternatives. Once your changes are approved, one of the
+project maintainers will merge them.
+
+We're much more likely to approve your changes if you:
+
+* Add tests for new functionality.
+* Write a [good commit message][commit-message].
+* Maintain backward compatibility.
+
+[fork]: https://github.com/uber-go/zap/fork
+[open-issue]: https://github.com/uber-go/zap/issues/new
+[cla]: https://cla-assistant.io/uber-go/zap
+[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
new file mode 100644
index 0000000000..4256d35c76
--- /dev/null
+++ b/vendor/go.uber.org/zap/FAQ.md
@@ -0,0 +1,155 @@
+# Frequently Asked Questions
+
+## Design
+
+### Why spend so much effort on logger performance?
+
+Of course, most applications won't notice the impact of a slow logger: they
+already take tens or hundreds of milliseconds for each operation, so an extra
+millisecond doesn't matter.
+
+On the other hand, why *not* make structured logging fast? The `SugaredLogger`
+isn't any harder to use than other logging packages, and the `Logger` makes
+structured logging possible in performance-sensitive contexts. Across a fleet
+of Go microservices, making each application even slightly more efficient adds
+up quickly.
+
+### Why aren't `Logger` and `SugaredLogger` interfaces?
+
+Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
+`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
+out][go-proverbs], "The bigger the interface, the weaker the abstraction."
+Interfaces are also rigid — *any* change requires releasing a new major
+version, since it breaks all third-party implementations.
+
+Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
+abstraction, and it lets us add methods without introducing breaking changes.
+Your applications should define and depend upon an interface that includes
+just the methods you use.
+
+### Why sample application logs?
+
+Applications often experience runs of errors, either because of a bug or
+because of a misbehaving user. Logging errors is usually a good idea, but it
+can easily make this bad situation worse: not only is your application coping
+with a flood of errors, it's also spending extra CPU cycles and I/O logging
+those errors. Since writes are typically serialized, logging limits throughput
+when you need it most.
+
+Sampling fixes this problem by dropping repetitive log entries. Under normal
+conditions, your application writes out every entry. When similar entries are
+logged hundreds or thousands of times each second, though, zap begins dropping
+duplicates to preserve throughput.
+
+### Why do the structured logging APIs take a message in addition to fields?
+
+Subjectively, we find it helpful to accompany structured context with a brief
+description. This isn't critical during development, but it makes debugging
+and operating unfamiliar systems much easier.
+
+More concretely, zap's sampling algorithm uses the message to identify
+duplicate entries. In our experience, this is a practical middle ground
+between random sampling (which often drops the exact entry that you need while
+debugging) and hashing the complete entry (which is prohibitively expensive).
+
+### Why include package-global loggers?
+
+Since so many other logging packages include a global logger, many
+applications aren't designed to accept loggers as explicit parameters.
+Changing function signatures is often a breaking change, so zap includes
+global loggers to simplify migration.
+
+Avoid them where possible.
+
+### Why include dedicated Panic and Fatal log levels?
+
+In general, application code should handle errors gracefully instead of using
+`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
+crash when an error is truly unrecoverable. To avoid losing any information
+— especially the reason for the crash — the logger must flush any
+buffered entries before the process exits.
+
+Zap makes this easy by offering `Panic` and `Fatal` logging methods that
+automatically flush before exiting. Of course, this doesn't guarantee that
+logs will never be lost, but it eliminates a common error.
+
+See the discussion in uber-go/zap#207 for more details.
+
+### What's `DPanic`?
+
+`DPanic` stands for "panic in development." In development, it logs at
+`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
+catch errors that are theoretically possible, but shouldn't actually happen,
+*without* crashing in production.
+
+If you've ever written code like this, you need `DPanic`:
+
+```go
+if err != nil {
+ panic(fmt.Sprintf("shouldn't ever get here: %v", err))
+}
+```
+
+## Installation
+
+### What does the error `expects import "go.uber.org/zap"` mean?
+
+Either zap was installed incorrectly or you're referencing the wrong package
+name in your code.
+
+Zap's source code happens to be hosted on GitHub, but the [import
+path][import-path] is `go.uber.org/zap`. This gives us, the project
+maintainers, the freedom to move the source code if necessary. However, it
+means that you need to take a little care when installing and using the
+package.
+
+If you follow two simple rules, everything should work: install zap with `go
+get -u go.uber.org/zap`, and always import it in your code with `import
+"go.uber.org/zap"`. Your code shouldn't contain *any* references to
+`github.com/uber-go/zap`.
+
+## Usage
+
+### Does zap support log rotation?
+
+Zap doesn't natively support rotating log files, since we prefer to leave this
+to an external program like `logrotate`.
+
+However, it's easy to integrate a log rotation package like
+[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
+
+```go
+// lumberjack.Logger is already safe for concurrent use, so we don't need to
+// lock it.
+w := zapcore.AddSync(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+})
+core := zapcore.NewCore(
+ zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ w,
+ zap.InfoLevel,
+)
+logger := zap.New(core)
+```
+
+## Extensions
+
+We'd love to support every logging need within zap itself, but we're only
+familiar with a handful of log ingestion systems, flag-parsing packages, and
+the like. Rather than merging code that we can't effectively debug and
+support, we'd rather grow an ecosystem of zap extensions.
+
+We're aware of the following extensions, but haven't used them ourselves:
+
+| Package | Integration |
+| --- | --- |
+| `github.com/tchap/zapext` | Sentry, syslog |
+| `github.com/fgrosse/zaptest` | Ginkgo |
+| `github.com/blendle/zapdriver` | Stackdriver |
+
+[go-proverbs]: https://go-proverbs.github.io/
+[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
+[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt
new file mode 100644
index 0000000000..6652bed45f
--- /dev/null
+++ b/vendor/go.uber.org/zap/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
new file mode 100644
index 0000000000..ef7893b3b0
--- /dev/null
+++ b/vendor/go.uber.org/zap/Makefile
@@ -0,0 +1,76 @@
+export GO15VENDOREXPERIMENT=1
+
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+PKGS ?= $(shell glide novendor)
+# Many Go tools take file globs or directories as arguments instead of packages.
+PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest
+
+# The linting tools evolve with each Go version, so run them only on the latest
+# stable release.
+GO_VERSION := $(shell go version | cut -d " " -f 3)
+GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION)))
+LINTABLE_MINOR_VERSIONS := 10
+ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),)
+SHOULD_LINT := true
+endif
+
+
+.PHONY: all
+all: lint test
+
+.PHONY: dependencies
+dependencies:
+ @echo "Installing Glide and locked dependencies..."
+ glide --version || go get -u -f github.com/Masterminds/glide
+ glide install
+ @echo "Installing test dependencies..."
+ go install ./vendor/github.com/axw/gocov/gocov
+ go install ./vendor/github.com/mattn/goveralls
+ifdef SHOULD_LINT
+ @echo "Installing golint..."
+ go install ./vendor/github.com/golang/lint/golint
+else
+ @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION)
+endif
+
+# Disable printf-like invocation checking due to testify.assert.Error()
+VET_RULES := -printf=false
+
+.PHONY: lint
+lint:
+ifdef SHOULD_LINT
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log
+ @echo "Installing test dependencies for vet..."
+ @go test -i $(PKGS)
+ @echo "Checking vet..."
+ @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;)
+ @echo "Checking lint..."
+ @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;)
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./check_license.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+else
+ @echo "Skipping linters on" $(GO_VERSION)
+endif
+
+.PHONY: test
+test:
+ go test -race $(PKGS)
+
+.PHONY: cover
+cover:
+ ./scripts/cover.sh $(PKGS)
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);)
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
new file mode 100644
index 0000000000..f4fd1cb444
--- /dev/null
+++ b/vendor/go.uber.org/zap/README.md
@@ -0,0 +1,136 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+| Package | Time | Objects Allocated |
+| :--- | :---: | :---: |
+| :zap: zap | 3131 ns/op | 5 allocs/op |
+| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op |
+| zerolog | 16154 ns/op | 90 allocs/op |
+| lion | 16341 ns/op | 111 allocs/op |
+| go-kit | 17049 ns/op | 126 allocs/op |
+| logrus | 23662 ns/op | 142 allocs/op |
+| log15 | 36351 ns/op | 149 allocs/op |
+| apex/log | 42530 ns/op | 126 allocs/op |
+
+Log a message with a logger that already has 10 fields of context:
+
+| Package | Time | Objects Allocated |
+| :--- | :---: | :---: |
+| :zap: zap | 380 ns/op | 0 allocs/op |
+| :zap: zap (sugared) | 564 ns/op | 2 allocs/op |
+| zerolog | 321 ns/op | 0 allocs/op |
+| lion | 7092 ns/op | 39 allocs/op |
+| go-kit | 20226 ns/op | 115 allocs/op |
+| logrus | 22312 ns/op | 130 allocs/op |
+| log15 | 28788 ns/op | 79 allocs/op |
+| apex/log | 42063 ns/op | 115 allocs/op |
+
+Log a static string, without any context or `printf`-style templating:
+
+| Package | Time | Objects Allocated |
+| :--- | :---: | :---: |
+| :zap: zap | 361 ns/op | 0 allocs/op |
+| :zap: zap (sugared) | 534 ns/op | 2 allocs/op |
+| zerolog | 323 ns/op | 0 allocs/op |
+| standard library | 575 ns/op | 2 allocs/op |
+| go-kit | 922 ns/op | 13 allocs/op |
+| lion | 1413 ns/op | 10 allocs/op |
+| logrus | 2291 ns/op | 27 allocs/op |
+| apex/log | 3690 ns/op | 11 allocs/op |
+| log15 | 5954 ns/op | 26 allocs/op |
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+
+[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
+[doc]: https://godoc.org/go.uber.org/zap
+[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master
+[ci]: https://travis-ci.org/uber-go/zap
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
new file mode 100644
index 0000000000..5be3704a3e
--- /dev/null
+++ b/vendor/go.uber.org/zap/array.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) Field {
+ return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) Field {
+ return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) Field {
+ return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) Field {
+ return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) Field {
+ return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) Field {
+ return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) Field {
+ return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) Field {
+ return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) Field {
+ return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) Field {
+ return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) Field {
+ return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) Field {
+ return Array(key, int8s(nums))
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) Field {
+ return Array(key, stringArray(ss))
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) Field {
+ return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) Field {
+ return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) Field {
+ return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) Field {
+ return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) Field {
+ return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) Field {
+ return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) Field {
+ return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) Field {
+ return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bs {
+ arr.AppendBool(bs[i])
+ }
+ return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bss {
+ arr.AppendByteString(bss[i])
+ }
+ return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex128(nums[i])
+ }
+ return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex64(nums[i])
+ }
+ return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ds {
+ arr.AppendDuration(ds[i])
+ }
+ return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat64(nums[i])
+ }
+ return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat32(nums[i])
+ }
+ return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt(nums[i])
+ }
+ return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt64(nums[i])
+ }
+ return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt32(nums[i])
+ }
+ return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt16(nums[i])
+ }
+ return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt8(nums[i])
+ }
+ return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ss {
+ arr.AppendString(ss[i])
+ }
+ return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ts {
+ arr.AppendTime(ts[i])
+ }
+ return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint(nums[i])
+ }
+ return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint64(nums[i])
+ }
+ return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint32(nums[i])
+ }
+ return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint16(nums[i])
+ }
+ return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint8(nums[i])
+ }
+ return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUintptr(nums[i])
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/buffer/BUILD.bazel b/vendor/go.uber.org/zap/buffer/BUILD.bazel
new file mode 100644
index 0000000000..6a50a83f8b
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/BUILD.bazel
@@ -0,0 +1,12 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "buffer.go",
+ "pool.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap/buffer",
+ importpath = "go.uber.org/zap/buffer",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
new file mode 100644
index 0000000000..7592e8c63f
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -0,0 +1,115 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer // import "go.uber.org/zap/buffer"
+
+import "strconv"
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+ bs []byte
+ pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+ b.bs = append(b.bs, v)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+ b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+ b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+ b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+ b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+ b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+ return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+ return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+ return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+ return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+ b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+ b.bs = append(b.bs, bs...)
+ return len(bs), nil
+}
+
+// TrimNewline trims any final "\n" byte from the end of the buffer.
+func (b *Buffer) TrimNewline() {
+ if i := len(b.bs) - 1; i >= 0 {
+ if b.bs[i] == '\n' {
+ b.bs = b.bs[:i]
+ }
+ }
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+ b.pool.put(b)
+}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
new file mode 100644
index 0000000000..8fb3e202cf
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import "sync"
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+ p *sync.Pool
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+ return Pool{p: &sync.Pool{
+ New: func() interface{} {
+ return &Buffer{bs: make([]byte, 0, _size)}
+ },
+ }}
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+ buf := p.p.Get().(*Buffer)
+ buf.Reset()
+ buf.pool = p
+ return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+ p.p.Put(buf)
+}
diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/check_license.sh
new file mode 100644
index 0000000000..345ac8b89a
--- /dev/null
+++ b/vendor/go.uber.org/zap/check_license.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
new file mode 100644
index 0000000000..6fe17d9e0f
--- /dev/null
+++ b/vendor/go.uber.org/zap/config.go
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sort"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// Values configured here are per-second. See zapcore.NewSampler for details.
+type SamplingConfig struct {
+ Initial int `json:"initial" yaml:"initial"`
+ Thereafter int `json:"thereafter" yaml:"thereafter"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling Config.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level AtomicLevel `json:"level" yaml:"level"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development bool `json:"development" yaml:"development"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+ // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+ Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+ // EncoderConfig sets options for the chosen encoder. See
+ // zapcore.EncoderConfig for details.
+ EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+ // OutputPaths is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+ // ErrorOutputPaths is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+ // InitialFields is a collection of fields to add to the root logger.
+ InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewProductionConfig is a reasonable production logging configuration.
+// Logging is enabled at InfoLevel and above.
+//
+// It uses a JSON encoder, writes to standard error, and enables sampling.
+// Stacktraces are automatically included on logs of ErrorLevel and above.
+func NewProductionConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(InfoLevel),
+ Development: false,
+ Sampling: &SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: NewProductionEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ // Keys can be anything except the empty string.
+ TimeKey: "T",
+ LevelKey: "L",
+ NameKey: "N",
+ CallerKey: "C",
+ MessageKey: "M",
+ StacktraceKey: "S",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.CapitalLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewDevelopmentConfig is a reasonable development logging configuration.
+// Logging is enabled at DebugLevel and above.
+//
+// It enables development mode (which makes DPanicLevel logs panic), uses a
+// console encoder, writes to standard error, and disables sampling.
+// Stacktraces are automatically included on logs of WarnLevel and above.
+func NewDevelopmentConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(DebugLevel),
+ Development: true,
+ Encoding: "console",
+ EncoderConfig: NewDevelopmentEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+ enc, err := cfg.buildEncoder()
+ if err != nil {
+ return nil, err
+ }
+
+ sink, errSink, err := cfg.openSinks()
+ if err != nil {
+ return nil, err
+ }
+
+ log := New(
+ zapcore.NewCore(enc, sink, cfg.Level),
+ cfg.buildOptions(errSink)...,
+ )
+ if len(opts) > 0 {
+ log = log.WithOptions(opts...)
+ }
+ return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+ opts := []Option{ErrorOutput(errSink)}
+
+ if cfg.Development {
+ opts = append(opts, Development())
+ }
+
+ if !cfg.DisableCaller {
+ opts = append(opts, AddCaller())
+ }
+
+ stackLevel := ErrorLevel
+ if cfg.Development {
+ stackLevel = WarnLevel
+ }
+ if !cfg.DisableStacktrace {
+ opts = append(opts, AddStacktrace(stackLevel))
+ }
+
+ if cfg.Sampling != nil {
+ opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter))
+ }))
+ }
+
+ if len(cfg.InitialFields) > 0 {
+ fs := make([]Field, 0, len(cfg.InitialFields))
+ keys := make([]string, 0, len(cfg.InitialFields))
+ for k := range cfg.InitialFields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fs = append(fs, Any(k, cfg.InitialFields[k]))
+ }
+ opts = append(opts, Fields(fs...))
+ }
+
+ return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+ sink, closeOut, err := Open(cfg.OutputPaths...)
+ if err != nil {
+ return nil, nil, err
+ }
+ errSink, _, err := Open(cfg.ErrorOutputPaths...)
+ if err != nil {
+ closeOut()
+ return nil, nil, err
+ }
+ return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+ return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
new file mode 100644
index 0000000000..8638dd1b96
--- /dev/null
+++ b/vendor/go.uber.org/zap/doc.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
new file mode 100644
index 0000000000..2e9d3c3415
--- /dev/null
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+ _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+ "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewConsoleEncoder(encoderConfig), nil
+ },
+ "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewJSONEncoder(encoderConfig), nil
+ },
+ }
+ _encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+ _encoderMutex.Lock()
+ defer _encoderMutex.Unlock()
+ if name == "" {
+ return errNoEncoderNameSpecified
+ }
+ if _, ok := _encoderNameToConstructor[name]; ok {
+ return fmt.Errorf("encoder already registered for name %q", name)
+ }
+ _encoderNameToConstructor[name] = constructor
+ return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ _encoderMutex.RLock()
+ defer _encoderMutex.RUnlock()
+ if name == "" {
+ return nil, errNoEncoderNameSpecified
+ }
+ constructor, ok := _encoderNameToConstructor[name]
+ if !ok {
+ return nil, fmt.Errorf("no encoder registered for name %q", name)
+ }
+ return constructor(encoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
new file mode 100644
index 0000000000..65982a51e5
--- /dev/null
+++ b/vendor/go.uber.org/zap/error.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) Field {
+ return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) Field {
+ if err == nil {
+ return Skip()
+ }
+ return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+ // To represent each error as an object with an "error" attribute and
+ // potentially an "errorVerbose" attribute, we need to wrap it in a
+ // type that implements LogObjectMarshaler. To prevent this from
+ // allocating, pool the wrapper type.
+ elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem.error = errs[i]
+ arr.AppendObject(elem)
+ elem.error = nil
+ _errArrayElemPool.Put(elem)
+ }
+ return nil
+}
+
+type errArrayElem struct {
+ error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // Re-use the error field's logic, which supports non-standard error types.
+ Error(e.error).AddTo(enc)
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
new file mode 100644
index 0000000000..5130e13477
--- /dev/null
+++ b/vendor/go.uber.org/zap/field.go
@@ -0,0 +1,310 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Field is an alias for Field. Aliasing this type dramatically
+// improves the navigability of this package's API documentation.
+type Field = zapcore.Field
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() Field {
+ return Field{Type: zapcore.SkipType}
+}
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) Field {
+ var ival int64
+ if val {
+ ival = 1
+ }
+ return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) Field {
+ return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) Field {
+ return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) Field {
+ return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) Field {
+ return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) Field {
+ return Int64(key, int64(val))
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) Field {
+ return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) Field {
+ return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) Field {
+ return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) Field {
+ return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) Field {
+ return Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) Field {
+ return Uint64(key, uint64(val))
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) Field {
+ return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) Field {
+ return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) Field {
+ return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) Field {
+ return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) Field {
+ return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) Field {
+ return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) Field {
+ return Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) Field {
+ return Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) Field {
+ return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) Field {
+ // Returning the stacktrace as a string costs an allocation, but saves us
+ // from expanding the zapcore.Field union struct to include a byte slice. Since
+ // taking a stacktrace is already so expensive (~10us), the extra allocation
+ // is okay.
+ return String(key, takeStacktrace())
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) Field {
+ return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize surprises, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) Field {
+ switch val := value.(type) {
+ case zapcore.ObjectMarshaler:
+ return Object(key, val)
+ case zapcore.ArrayMarshaler:
+ return Array(key, val)
+ case bool:
+ return Bool(key, val)
+ case []bool:
+ return Bools(key, val)
+ case complex128:
+ return Complex128(key, val)
+ case []complex128:
+ return Complex128s(key, val)
+ case complex64:
+ return Complex64(key, val)
+ case []complex64:
+ return Complex64s(key, val)
+ case float64:
+ return Float64(key, val)
+ case []float64:
+ return Float64s(key, val)
+ case float32:
+ return Float32(key, val)
+ case []float32:
+ return Float32s(key, val)
+ case int:
+ return Int(key, val)
+ case []int:
+ return Ints(key, val)
+ case int64:
+ return Int64(key, val)
+ case []int64:
+ return Int64s(key, val)
+ case int32:
+ return Int32(key, val)
+ case []int32:
+ return Int32s(key, val)
+ case int16:
+ return Int16(key, val)
+ case []int16:
+ return Int16s(key, val)
+ case int8:
+ return Int8(key, val)
+ case []int8:
+ return Int8s(key, val)
+ case string:
+ return String(key, val)
+ case []string:
+ return Strings(key, val)
+ case uint:
+ return Uint(key, val)
+ case []uint:
+ return Uints(key, val)
+ case uint64:
+ return Uint64(key, val)
+ case []uint64:
+ return Uint64s(key, val)
+ case uint32:
+ return Uint32(key, val)
+ case []uint32:
+ return Uint32s(key, val)
+ case uint16:
+ return Uint16(key, val)
+ case []uint16:
+ return Uint16s(key, val)
+ case uint8:
+ return Uint8(key, val)
+ case []byte:
+ return Binary(key, val)
+ case uintptr:
+ return Uintptr(key, val)
+ case []uintptr:
+ return Uintptrs(key, val)
+ case time.Time:
+ return Time(key, val)
+ case []time.Time:
+ return Times(key, val)
+ case time.Duration:
+ return Duration(key, val)
+ case []time.Duration:
+ return Durations(key, val)
+ case error:
+ return NamedError(key, val)
+ case []error:
+ return Errors(key, val)
+ case fmt.Stringer:
+ return Stringer(key, val)
+ default:
+ return Reflect(key, val)
+ }
+}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
new file mode 100644
index 0000000000..1312875072
--- /dev/null
+++ b/vendor/go.uber.org/zap/flag.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "flag"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+ lvl := defaultLevel
+ flag.Var(&lvl, name, usage)
+ return &lvl
+}
diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock
new file mode 100644
index 0000000000..881b462c0e
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.lock
@@ -0,0 +1,76 @@
+hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92
+updated: 2017-07-22T18:06:49.598185334-07:00
+imports:
+- name: go.uber.org/atomic
+ version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf
+- name: go.uber.org/multierr
+ version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
+testImports:
+- name: github.com/apex/log
+ version: d9b960447bfa720077b2da653cc79e533455b499
+ subpackages:
+ - handlers/json
+- name: github.com/axw/gocov
+ version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8
+ subpackages:
+ - gocov
+- name: github.com/davecgh/go-spew
+ version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
+ subpackages:
+ - spew
+- name: github.com/fatih/color
+ version: 62e9147c64a1ed519147b62a56a14e83e2be02c1
+- name: github.com/go-kit/kit
+ version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d
+ subpackages:
+ - log
+- name: github.com/go-logfmt/logfmt
+ version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
+- name: github.com/go-stack/stack
+ version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b
+- name: github.com/golang/lint
+ version: c5fb716d6688a859aae56d26d3e6070808df29f7
+ subpackages:
+ - golint
+- name: github.com/kr/logfmt
+ version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
+- name: github.com/mattn/go-colorable
+ version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a
+- name: github.com/mattn/go-isatty
+ version: fc9e8d8ef48496124e79ae0df75490096eccf6fe
+- name: github.com/mattn/goveralls
+ version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0
+- name: github.com/pborman/uuid
+ version: e790cca94e6cc75c7064b1332e63811d4aae1a53
+- name: github.com/pkg/errors
+ version: 645ef00459ed84a119197bfb8d8205042c6df63d
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/rs/zerolog
+ version: eed4c2b94d945e0b2456ad6aa518a443986b5f22
+- name: github.com/satori/go.uuid
+ version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b
+- name: github.com/sirupsen/logrus
+ version: 7dd06bf38e1e13df288d471a57d5adbac106be9e
+- name: github.com/stretchr/testify
+ version: f6abca593680b2315d2075e0f5e2a9751e3f431a
+ subpackages:
+ - assert
+ - require
+- name: go.pedge.io/lion
+ version: 87958e8713f1fa138d993087133b97e976642159
+- name: golang.org/x/sys
+ version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f
+ subpackages:
+ - unix
+- name: golang.org/x/tools
+ version: 496819729719f9d07692195e0a94d6edd2251389
+ subpackages:
+ - cover
+- name: gopkg.in/inconshreveable/log15.v2
+ version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f
+ subpackages:
+ - stack
+ - term
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
new file mode 100644
index 0000000000..94412594ca
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.yaml
@@ -0,0 +1,35 @@
+package: go.uber.org/zap
+license: MIT
+import:
+- package: go.uber.org/atomic
+ version: ^1
+- package: go.uber.org/multierr
+ version: ^1
+testImport:
+- package: github.com/satori/go.uuid
+- package: github.com/sirupsen/logrus
+- package: github.com/apex/log
+ subpackages:
+ - handlers/json
+- package: github.com/go-kit/kit
+ subpackages:
+ - log
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+- package: gopkg.in/inconshreveable/log15.v2
+- package: github.com/mattn/goveralls
+- package: github.com/pborman/uuid
+- package: github.com/pkg/errors
+- package: go.pedge.io/lion
+- package: github.com/rs/zerolog
+- package: golang.org/x/tools
+ subpackages:
+ - cover
+- package: github.com/golang/lint
+ subpackages:
+ - golint
+- package: github.com/axw/gocov
+ subpackages:
+ - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
new file mode 100644
index 0000000000..d02232e39f
--- /dev/null
+++ b/vendor/go.uber.org/zap/global.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ _stdLogDefaultDepth = 2
+ _loggerWriterDepth = 2
+ _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
+ "https://github.com/uber-go/zap/issues/new and reference this error: %v"
+)
+
+var (
+ _globalMu sync.RWMutex
+ _globalL = NewNop()
+ _globalS = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+ _globalMu.RLock()
+ l := _globalL
+ _globalMu.RUnlock()
+ return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+ _globalMu.RLock()
+ s := _globalS
+ _globalMu.RUnlock()
+ return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+ _globalMu.Lock()
+ prev := _globalL
+ _globalL = logger
+ _globalS = logger.Sugar()
+ _globalMu.Unlock()
+ return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ f := logger.Info
+ return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLog(l *Logger) func() {
+ f, err := redirectStdLogAt(l, InfoLevel)
+ if err != nil {
+ // Can't get here, since passing InfoLevel to redirectStdLogAt always
+ // works.
+ panic(fmt.Sprintf(_programmerErrorTemplate, err))
+ }
+ return f
+}
+
+// RedirectStdLogAt redirects output from the standard library's package-global
+// logger to the supplied logger at the specified level. Since zap already
+// handles caller annotations, timestamps, etc., it automatically disables the
+// standard library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ return redirectStdLogAt(l, level)
+}
+
+func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ flags := log.Flags()
+ prefix := log.Prefix()
+ log.SetFlags(0)
+ log.SetPrefix("")
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ log.SetOutput(&loggerWriter{logFunc})
+ return func() {
+ log.SetFlags(flags)
+ log.SetPrefix(prefix)
+ log.SetOutput(os.Stderr)
+ }, nil
+}
+
+func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
+ switch lvl {
+ case DebugLevel:
+ return logger.Debug, nil
+ case InfoLevel:
+ return logger.Info, nil
+ case WarnLevel:
+ return logger.Warn, nil
+ case ErrorLevel:
+ return logger.Error, nil
+ case DPanicLevel:
+ return logger.DPanic, nil
+ case PanicLevel:
+ return logger.Panic, nil
+ case FatalLevel:
+ return logger.Fatal, nil
+ }
+ return nil, fmt.Errorf("unrecognized level: %q", lvl)
+}
+
+type loggerWriter struct {
+ logFunc func(msg string, fields ...Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+ p = bytes.TrimSpace(p)
+ l.logFunc(string(p))
+ return len(p), nil
+}
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
new file mode 100644
index 0000000000..1b0ecaca9c
--- /dev/null
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// GET requests return a JSON description of the current logging level. PUT
+// requests change the logging level and expect a payload like:
+// {"level":"info"}
+//
+// It's perfectly safe to change the logging level while a program is running.
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ type errorResponse struct {
+ Error string `json:"error"`
+ }
+ type payload struct {
+ Level *zapcore.Level `json:"level"`
+ }
+
+ enc := json.NewEncoder(w)
+
+ switch r.Method {
+
+ case http.MethodGet:
+ current := lvl.Level()
+ enc.Encode(payload{Level: ¤t})
+
+ case http.MethodPut:
+ var req payload
+
+ if errmess := func() string {
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ return fmt.Sprintf("Request body must be well-formed JSON: %v", err)
+ }
+ if req.Level == nil {
+ return "Must specify a logging level."
+ }
+ return ""
+ }(); errmess != "" {
+ w.WriteHeader(http.StatusBadRequest)
+ enc.Encode(errorResponse{Error: errmess})
+ return
+ }
+
+ lvl.SetLevel(*req.Level)
+ enc.Encode(req)
+
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ enc.Encode(errorResponse{
+ Error: "Only GET and PUT are supported.",
+ })
+ }
+}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/BUILD.bazel b/vendor/go.uber.org/zap/internal/bufferpool/BUILD.bazel
new file mode 100644
index 0000000000..d7935ffea3
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/BUILD.bazel
@@ -0,0 +1,10 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["bufferpool.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap/internal/bufferpool",
+ importpath = "go.uber.org/zap/internal/bufferpool",
+ visibility = ["//vendor/go.uber.org/zap:__subpackages__"],
+ deps = ["//vendor/go.uber.org/zap/buffer:go_default_library"],
+)
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
new file mode 100644
index 0000000000..dad583aaa5
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+ _pool = buffer.NewPool()
+ // Get retrieves a buffer from the pool, creating one if necessary.
+ Get = _pool.Get
+)
diff --git a/vendor/go.uber.org/zap/internal/color/BUILD.bazel b/vendor/go.uber.org/zap/internal/color/BUILD.bazel
new file mode 100644
index 0000000000..a34f672226
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["color.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap/internal/color",
+ importpath = "go.uber.org/zap/internal/color",
+ visibility = ["//vendor/go.uber.org/zap:__subpackages__"],
+)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
new file mode 100644
index 0000000000..c4d5d02abc
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/color.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+ Black Color = iota + 30
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+ return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}
diff --git a/vendor/go.uber.org/zap/internal/exit/BUILD.bazel b/vendor/go.uber.org/zap/internal/exit/BUILD.bazel
new file mode 100644
index 0000000000..d172988e60
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["exit.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap/internal/exit",
+ importpath = "go.uber.org/zap/internal/exit",
+ visibility = ["//vendor/go.uber.org/zap:__subpackages__"],
+)
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
new file mode 100644
index 0000000000..dfc5b05feb
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var real = func() { os.Exit(1) }
+
+// Exit normally terminates the process by calling os.Exit(1). If the package
+// is stubbed, it instead records a call in the testing spy.
+func Exit() {
+ real()
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+ Exited bool
+ prev func()
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+ s := &StubbedExit{prev: real}
+ real = s.exit
+ return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+ s := Stub()
+ defer s.Unstub()
+ f()
+ return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+ real = se.prev
+}
+
+func (se *StubbedExit) exit() {
+ se.Exited = true
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
new file mode 100644
index 0000000000..3567a9a1e6
--- /dev/null
+++ b/vendor/go.uber.org/zap/level.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "go.uber.org/atomic"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel = zapcore.DebugLevel
+ // InfoLevel is the default logging priority.
+ InfoLevel = zapcore.InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel = zapcore.WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel = zapcore.ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel = zapcore.DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel = zapcore.PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+ l *atomic.Int32
+}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+ return AtomicLevel{
+ l: atomic.NewInt32(int32(InfoLevel)),
+ }
+}
+
+// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+ a := NewAtomicLevel()
+ a.SetLevel(l)
+ return a
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+ return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+ return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+ lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+ return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+ if lvl.l == nil {
+ lvl.l = &atomic.Int32{}
+ }
+
+ var l zapcore.Level
+ if err := l.UnmarshalText(text); err != nil {
+ return err
+ }
+
+ lvl.SetLevel(l)
+ return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+ return lvl.Level().MarshalText()
+}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
new file mode 100644
index 0000000000..dc8f6e3a4b
--- /dev/null
+++ b/vendor/go.uber.org/zap/logger.go
@@ -0,0 +1,305 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+ core zapcore.Core
+
+ development bool
+ name string
+ errorOutput zapcore.WriteSyncer
+
+ addCaller bool
+ addStack zapcore.LevelEnabler
+
+ callerSkip int
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+ if core == nil {
+ return NewNop()
+ }
+ log := &Logger{
+ core: core,
+ errorOutput: zapcore.Lock(os.Stderr),
+ addStack: zapcore.FatalLevel + 1,
+ }
+ return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+ return &Logger{
+ core: zapcore.NewNopCore(),
+ errorOutput: zapcore.AddSync(ioutil.Discard),
+ addStack: zapcore.FatalLevel + 1,
+ }
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+ return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+ return NewDevelopmentConfig().Build(options...)
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+ encoderCfg := zapcore.EncoderConfig{
+ MessageKey: "msg",
+ LevelKey: "level",
+ NameKey: "logger",
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ }
+ core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+ return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+ core := log.clone()
+ core.callerSkip += 2
+ return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+ if s == "" {
+ return log
+ }
+ l := log.clone()
+ if log.name == "" {
+ l.name = s
+ } else {
+ l.name = strings.Join([]string{l.name, s}, ".")
+ }
+ return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+ c := log.clone()
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+ return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa.
+func (log *Logger) With(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ l := log.clone()
+ l.core = l.core.With(fields)
+ return l
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ return log.check(lvl, msg)
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...Field) {
+ if ce := log.check(DebugLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...Field) {
+ if ce := log.check(InfoLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...Field) {
+ if ce := log.check(WarnLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...Field) {
+ if ce := log.check(ErrorLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...Field) {
+ if ce := log.check(DPanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...Field) {
+ if ce := log.check(PanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...Field) {
+ if ce := log.check(FatalLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+ return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+ return log.core
+}
+
+func (log *Logger) clone() *Logger {
+ copy := *log
+ return ©
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ // check must always be called directly by a method in the Logger interface
+ // (e.g., Check, Info, Fatal).
+ const callerSkipOffset = 2
+
+ // Create basic checked entry thru the core; this will be non-nil if the
+ // log message will actually be written somewhere.
+ ent := zapcore.Entry{
+ LoggerName: log.name,
+ Time: time.Now(),
+ Level: lvl,
+ Message: msg,
+ }
+ ce := log.core.Check(ent, nil)
+ willWrite := ce != nil
+
+ // Set up any required terminal behavior.
+ switch ent.Level {
+ case zapcore.PanicLevel:
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ case zapcore.FatalLevel:
+ ce = ce.Should(ent, zapcore.WriteThenFatal)
+ case zapcore.DPanicLevel:
+ if log.development {
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ }
+ }
+
+ // Only do further annotation if we're going to write this message; checked
+ // entries that exist only for terminal behavior don't benefit from
+ // annotation.
+ if !willWrite {
+ return ce
+ }
+
+ // Thread the error output through to the CheckedEntry.
+ ce.ErrorOutput = log.errorOutput
+ if log.addCaller {
+ ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset))
+ if !ce.Entry.Caller.Defined {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
+ log.errorOutput.Sync()
+ }
+ }
+ if log.addStack.Enabled(ce.Entry.Level) {
+ ce.Entry.Stack = Stack("").String
+ }
+
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
new file mode 100644
index 0000000000..7a6b0fca1b
--- /dev/null
+++ b/vendor/go.uber.org/zap/options.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "go.uber.org/zap/zapcore"
+
+// An Option configures a Logger.
+type Option interface {
+ apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = f(log.core)
+ })
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = zapcore.RegisterHooks(log.core, hooks...)
+ })
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...Field) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = log.core.With(fs)
+ })
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+ return optionFunc(func(log *Logger) {
+ log.errorOutput = w
+ })
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+ return optionFunc(func(log *Logger) {
+ log.development = true
+ })
+}
+
+// AddCaller configures the Logger to annotate each message with the filename
+// and line number of zap's caller.
+func AddCaller() Option {
+ return optionFunc(func(log *Logger) {
+ log.addCaller = true
+ })
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+ return optionFunc(func(log *Logger) {
+ log.callerSkip += skip
+ })
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ log.addStack = lvl
+ })
+}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
new file mode 100644
index 0000000000..ff0becfe5d
--- /dev/null
+++ b/vendor/go.uber.org/zap/sink.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const schemeFile = "file"
+
+var (
+ _sinkMutex sync.RWMutex
+ _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+)
+
+func init() {
+ resetSinkRegistry()
+}
+
+func resetSinkRegistry() {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ _sinkFactories = map[string]func(*url.URL) (Sink, error){
+ schemeFile: newFileSink,
+ }
+}
+
+// Sink defines the interface to write to and close logger destinations.
+type Sink interface {
+ zapcore.WriteSyncer
+ io.Closer
+}
+
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type errSinkNotFound struct {
+ scheme string
+}
+
+func (e *errSinkNotFound) Error() string {
+ return fmt.Sprintf("no sink found for scheme %q", e.scheme)
+}
+
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 3.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ if scheme == "" {
+ return errors.New("can't register a sink factory for empty string")
+ }
+ normalized, err := normalizeScheme(scheme)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
+ }
+ if _, ok := _sinkFactories[normalized]; ok {
+ return fmt.Errorf("sink factory already registered for scheme %q", normalized)
+ }
+ _sinkFactories[normalized] = factory
+ return nil
+}
+
+func newSink(rawURL string) (Sink, error) {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
+ }
+ if u.Scheme == "" {
+ u.Scheme = schemeFile
+ }
+
+ _sinkMutex.RLock()
+ factory, ok := _sinkFactories[u.Scheme]
+ _sinkMutex.RUnlock()
+ if !ok {
+ return nil, &errSinkNotFound{u.Scheme}
+ }
+ return factory(u)
+}
+
+func newFileSink(u *url.URL) (Sink, error) {
+ if u.User != nil {
+ return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
+ }
+ if u.Fragment != "" {
+ return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
+ }
+ if u.RawQuery != "" {
+ return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
+ }
+ // Error messages are better if we check hostname and port separately.
+ if u.Port() != "" {
+ return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
+ }
+ if hn := u.Hostname(); hn != "" && hn != "localhost" {
+ return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
+ }
+ switch u.Path {
+ case "stdout":
+ return nopCloserSink{os.Stdout}, nil
+ case "stderr":
+ return nopCloserSink{os.Stderr}, nil
+ }
+ return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
+}
+
+func normalizeScheme(s string) (string, error) {
+ // https://tools.ietf.org/html/rfc3986#section-3.1
+ s = strings.ToLower(s)
+ if first := s[0]; 'a' > first || 'z' < first {
+ return "", errors.New("must start with a letter")
+ }
+ for i := 1; i < len(s); i++ { // iterate over bytes, not runes
+ c := s[i]
+ switch {
+ case 'a' <= c && c <= 'z':
+ continue
+ case '0' <= c && c <= '9':
+ continue
+ case c == '.' || c == '+' || c == '-':
+ continue
+ }
+ return "", fmt.Errorf("may not contain %q", c)
+ }
+ return s, nil
+}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
new file mode 100644
index 0000000000..100fac2168
--- /dev/null
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -0,0 +1,126 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "runtime"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+const _zapPackage = "go.uber.org/zap"
+
+var (
+ _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return newProgramCounters(64)
+ },
+ }
+
+ // We add "." and "/" suffixes to the package name to ensure we only match
+ // the exact package and not any package with the same prefix.
+ _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/")
+ _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...)
+)
+
+func takeStacktrace() string {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+ programCounters := _stacktracePool.Get().(*programCounters)
+ defer _stacktracePool.Put(programCounters)
+
+ var numFrames int
+ for {
+ // Skip the call to runtime.Counters and takeStacktrace so that the
+ // program counters start at the caller of takeStacktrace.
+ numFrames = runtime.Callers(2, programCounters.pcs)
+ if numFrames < len(programCounters.pcs) {
+ break
+ }
+ // Don't put the too-short counter slice back into the pool; this lets
+ // the pool adjust if we consistently take deep stacktraces.
+ programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+ }
+
+ i := 0
+ skipZapFrames := true // skip all consecutive zap frames at the beginning.
+ frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := frames.Next(); more; frame, more = frames.Next() {
+ if skipZapFrames && isZapFrame(frame.Function) {
+ continue
+ } else {
+ skipZapFrames = false
+ }
+
+ if i != 0 {
+ buffer.AppendByte('\n')
+ }
+ i++
+ buffer.AppendString(frame.Function)
+ buffer.AppendByte('\n')
+ buffer.AppendByte('\t')
+ buffer.AppendString(frame.File)
+ buffer.AppendByte(':')
+ buffer.AppendInt(int64(frame.Line))
+ }
+
+ return buffer.String()
+}
+
+func isZapFrame(function string) bool {
+ for _, prefix := range _zapStacktracePrefixes {
+ if strings.HasPrefix(function, prefix) {
+ return true
+ }
+ }
+
+ // We can't use a prefix match here since the location of the vendor
+ // directory affects the prefix. Instead we do a contains match.
+ for _, contains := range _zapStacktraceVendorContains {
+ if strings.Contains(function, contains) {
+ return true
+ }
+ }
+
+ return false
+}
+
+type programCounters struct {
+ pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+ return &programCounters{make([]uintptr, size)}
+}
+
+func addPrefix(prefix string, ss ...string) []string {
+ withPrefix := make([]string, len(ss))
+ for i, s := range ss {
+ withPrefix[i] = prefix + s
+ }
+ return withPrefix
+}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
new file mode 100644
index 0000000000..77ca227f47
--- /dev/null
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ _oddNumberErrMsg = "Ignored key without a value."
+ _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes three methods: one for loosely-typed
+// structured logging, one for println-style formatting, and one for
+// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
+// output with Infow ("info with" structured context), Info, or Infof.
+type SugaredLogger struct {
+ base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+ base := s.base.clone()
+ base.callerSkip -= 2
+ return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+ return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
+// processing pairs, the first element of the pair is used as the field key
+// and the second as the field value.
+//
+// For example,
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+// is the equivalent of
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// Debug uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+ s.log(DebugLevel, "", args, nil)
+}
+
+// Info uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Info(args ...interface{}) {
+ s.log(InfoLevel, "", args, nil)
+}
+
+// Warn uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+ s.log(WarnLevel, "", args, nil)
+}
+
+// Error uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Error(args ...interface{}) {
+ s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic uses fmt.Sprint to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+ s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic uses fmt.Sprint to construct and log a message, then panics.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+ s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+ s.log(FatalLevel, "", args, nil)
+}
+
+// Debugf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+ s.log(DebugLevel, template, args, nil)
+}
+
+// Infof uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+ s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+ s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+ s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf uses fmt.Sprintf to log a templated message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+ s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf uses fmt.Sprintf to log a templated message, then panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+ s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+ s.log(FatalLevel, template, args, nil)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+// s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+ s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+ s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+ s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+ s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+ s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+ s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+ s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+ return s.base.Sync()
+}
+
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+ // If logging at this level is completely disabled, skip the overhead of
+ // string formatting.
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ // Format with Sprint, Sprintf, or neither.
+ msg := template
+ if msg == "" && len(fmtArgs) > 0 {
+ msg = fmt.Sprint(fmtArgs...)
+ } else if msg != "" && len(fmtArgs) > 0 {
+ msg = fmt.Sprintf(template, fmtArgs...)
+ }
+
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
+ if len(args) == 0 {
+ return nil
+ }
+
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields := make([]Field, 0, len(args))
+ var invalid invalidPairs
+
+ for i := 0; i < len(args); {
+ // This is a strongly-typed field. Consume it and move on.
+ if f, ok := args[i].(Field); ok {
+ fields = append(fields, f)
+ i++
+ continue
+ }
+
+ // Make sure this element isn't a dangling key.
+ if i == len(args)-1 {
+ s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i]))
+ break
+ }
+
+ // Consume this value and the next, treating them as a key-value pair. If the
+ // key isn't a string, add this pair to the slice of invalid pairs.
+ key, val := args[i], args[i+1]
+ if keyStr, ok := key.(string); !ok {
+ // Subsequent errors are likely, so allocate once up front.
+ if cap(invalid) == 0 {
+ invalid = make(invalidPairs, 0, len(args)/2)
+ }
+ invalid = append(invalid, invalidPair{i, key, val})
+ } else {
+ fields = append(fields, Any(keyStr, val))
+ }
+ i += 2
+ }
+
+ // If we encountered any invalid key-value pairs, log an error.
+ if len(invalid) > 0 {
+ s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid))
+ }
+ return fields
+}
+
+type invalidPair struct {
+ position int
+ key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddInt64("position", int64(p.position))
+ Any("key", p.key).AddTo(enc)
+ Any("value", p.value).AddTo(enc)
+ return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ var err error
+ for i := range ps {
+ err = multierr.Append(err, enc.AppendObject(ps[i]))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
new file mode 100644
index 0000000000..c5a1f16225
--- /dev/null
+++ b/vendor/go.uber.org/zap/time.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond)
+}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
new file mode 100644
index 0000000000..86a709ab0b
--- /dev/null
+++ b/vendor/go.uber.org/zap/writer.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of URLs, opens or
+// creates each of the specified resources, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
+// scheme and URLs with the "file" scheme. Third-party code may register
+// factories for other schemes using RegisterSink.
+//
+// URLs with the "file" scheme must use absolute paths on the local
+// filesystem. No user, password, port, fragments, or query parameters are
+// allowed, and the hostname must be empty or "localhost".
+//
+// Since it's common to write logs to the local filesystem, URLs without a
+// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
+// a scheme, the special paths "stdout" and "stderr" are interpreted as
+// os.Stdout and os.Stderr. When specified without a scheme, relative file
+// paths also work.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+ writers, close, err := open(paths)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ writer := CombineWriteSyncers(writers...)
+ return writer, close, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+ writers := make([]zapcore.WriteSyncer, 0, len(paths))
+ closers := make([]io.Closer, 0, len(paths))
+ close := func() {
+ for _, c := range closers {
+ c.Close()
+ }
+ }
+
+ var openErr error
+ for _, path := range paths {
+ sink, err := newSink(path)
+ if err != nil {
+ openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ continue
+ }
+ writers = append(writers, sink)
+ closers = append(closers, sink)
+ }
+ if openErr != nil {
+ close()
+ return writers, nil, openErr
+ }
+
+ return writers, close, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+ if len(writers) == 0 {
+ return zapcore.AddSync(ioutil.Discard)
+ }
+ return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}
diff --git a/vendor/go.uber.org/zap/zapcore/BUILD.bazel b/vendor/go.uber.org/zap/zapcore/BUILD.bazel
new file mode 100644
index 0000000000..28af1cb1b4
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "console_encoder.go",
+ "core.go",
+ "doc.go",
+ "encoder.go",
+ "entry.go",
+ "error.go",
+ "field.go",
+ "hook.go",
+ "json_encoder.go",
+ "level.go",
+ "level_strings.go",
+ "marshaler.go",
+ "memory_encoder.go",
+ "sampler.go",
+ "tee.go",
+ "write_syncer.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/go.uber.org/zap/zapcore",
+ importpath = "go.uber.org/zap/zapcore",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/go.uber.org/atomic:go_default_library",
+ "//vendor/go.uber.org/multierr:go_default_library",
+ "//vendor/go.uber.org/zap/buffer:go_default_library",
+ "//vendor/go.uber.org/zap/internal/bufferpool:go_default_library",
+ "//vendor/go.uber.org/zap/internal/color:go_default_library",
+ "//vendor/go.uber.org/zap/internal/exit:go_default_library",
+ ],
+)
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
new file mode 100644
index 0000000000..b7875966f4
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -0,0 +1,147 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var _sliceEncoderPool = sync.Pool{
+ New: func() interface{} {
+ return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
+ },
+}
+
+func getSliceEncoder() *sliceArrayEncoder {
+ return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+ e.elems = e.elems[:0]
+ _sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+ *jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+ return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+ return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ line := bufferpool.Get()
+
+ // We don't want the entry's metadata to be quoted and escaped (if it's
+ // encoded as strings), which means that we can't use the JSON encoder. The
+ // simplest option is to use the memory encoder and fmt.Fprint.
+ //
+ // If this ever becomes a performance bottleneck, we can implement
+ // ArrayEncoder for our plain-text format.
+ arr := getSliceEncoder()
+ if c.TimeKey != "" && c.EncodeTime != nil {
+ c.EncodeTime(ent.Time, arr)
+ }
+ if c.LevelKey != "" && c.EncodeLevel != nil {
+ c.EncodeLevel(ent.Level, arr)
+ }
+ if ent.LoggerName != "" && c.NameKey != "" {
+ nameEncoder := c.EncodeName
+
+ if nameEncoder == nil {
+ // Fall back to FullNameEncoder for backward compatibility.
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, arr)
+ }
+ if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil {
+ c.EncodeCaller(ent.Caller, arr)
+ }
+ for i := range arr.elems {
+ if i > 0 {
+ line.AppendByte('\t')
+ }
+ fmt.Fprint(line, arr.elems[i])
+ }
+ putSliceEncoder(arr)
+
+ // Add the message itself.
+ if c.MessageKey != "" {
+ c.addTabIfNecessary(line)
+ line.AppendString(ent.Message)
+ }
+
+ // Add any structured context.
+ c.writeContext(line, fields)
+
+ // If there's no stacktrace key, honor that; this allows users to force
+ // single-line output.
+ if ent.Stack != "" && c.StacktraceKey != "" {
+ line.AppendByte('\n')
+ line.AppendString(ent.Stack)
+ }
+
+ if c.LineEnding != "" {
+ line.AppendString(c.LineEnding)
+ } else {
+ line.AppendString(DefaultLineEnding)
+ }
+ return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+ context := c.jsonEncoder.Clone().(*jsonEncoder)
+ defer context.buf.Free()
+
+ addFields(context, extra)
+ context.closeOpenNamespaces()
+ if context.buf.Len() == 0 {
+ return
+ }
+
+ c.addTabIfNecessary(line)
+ line.AppendByte('{')
+ line.Write(context.buf.Bytes())
+ line.AppendByte('}')
+}
+
+func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) {
+ if line.Len() > 0 {
+ line.AppendByte('\t')
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
new file mode 100644
index 0000000000..a1ef8b034b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// Core is a minimal, fast logger interface. It's designed for library authors
+// to wrap in a more user-friendly API.
+type Core interface {
+ LevelEnabler
+
+ // With adds structured context to the Core.
+ With([]Field) Core
+ // Check determines whether the supplied Entry should be logged (using the
+ // embedded LevelEnabler and possibly some extra logic). If the entry
+ // should be logged, the Core adds itself to the CheckedEntry and returns
+ // the result.
+ //
+ // Callers must use Check before calling Write.
+ Check(Entry, *CheckedEntry) *CheckedEntry
+ // Write serializes the Entry and any Fields supplied at the log site and
+ // writes them to their destination.
+ //
+ // If called, Write should always log the Entry and Fields; it should not
+ // replicate the logic of Check.
+ Write(Entry, []Field) error
+ // Sync flushes buffered logs (if any).
+ Sync() error
+}
+
+type nopCore struct{}
+
+// NewNopCore returns a no-op Core.
+func NewNopCore() Core { return nopCore{} }
+func (nopCore) Enabled(Level) bool { return false }
+func (n nopCore) With([]Field) Core { return n }
+func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
+func (nopCore) Write(Entry, []Field) error { return nil }
+func (nopCore) Sync() error { return nil }
+
+// NewCore creates a Core that writes logs to a WriteSyncer.
+func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
+ return &ioCore{
+ LevelEnabler: enab,
+ enc: enc,
+ out: ws,
+ }
+}
+
+type ioCore struct {
+ LevelEnabler
+ enc Encoder
+ out WriteSyncer
+}
+
+func (c *ioCore) With(fields []Field) Core {
+ clone := c.clone()
+ addFields(clone.enc, fields)
+ return clone
+}
+
+func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if c.Enabled(ent.Level) {
+ return ce.AddCore(ent, c)
+ }
+ return ce
+}
+
+func (c *ioCore) Write(ent Entry, fields []Field) error {
+ buf, err := c.enc.EncodeEntry(ent, fields)
+ if err != nil {
+ return err
+ }
+ _, err = c.out.Write(buf.Bytes())
+ buf.Free()
+ if err != nil {
+ return err
+ }
+ if ent.Level > ErrorLevel {
+ // Since we may be crashing the program, sync the output. Ignore Sync
+ // errors, pending a clean solution to issue #370.
+ c.Sync()
+ }
+ return nil
+}
+
+func (c *ioCore) Sync() error {
+ return c.out.Sync()
+}
+
+func (c *ioCore) clone() *ioCore {
+ return &ioCore{
+ LevelEnabler: c.LevelEnabler,
+ enc: c.enc.Clone(),
+ out: c.out,
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
new file mode 100644
index 0000000000..31000e91f7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapcore defines and implements the low-level interfaces upon which
+// zap is built. By providing alternate implementations of these interfaces,
+// external packages can extend zap's capabilities.
+package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
new file mode 100644
index 0000000000..f0509522b5
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -0,0 +1,348 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+
+ "go.uber.org/zap/buffer"
+)
+
+// DefaultLineEnding defines the default line ending when writing logs.
+// Alternate line endings specified in EncoderConfig can override this
+// behavior.
+const DefaultLineEnding = "\n"
+
+// A LevelEncoder serializes a Level to a primitive type.
+type LevelEncoder func(Level, PrimitiveArrayEncoder)
+
+// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
+// InfoLevel is serialized to "info".
+func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.String())
+}
+
+// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
+// For example, InfoLevel is serialized to "info" and colored blue.
+func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToLowercaseColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.String())
+ }
+ enc.AppendString(s)
+}
+
+// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
+// InfoLevel is serialized to "INFO".
+func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.CapitalString())
+}
+
+// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
+// For example, InfoLevel is serialized to "INFO" and colored blue.
+func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToCapitalColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.CapitalString())
+ }
+ enc.AppendString(s)
+}
+
+// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
+// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
+// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
+// is unmarshaled to LowercaseLevelEncoder.
+func (e *LevelEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "capital":
+ *e = CapitalLevelEncoder
+ case "capitalColor":
+ *e = CapitalColorLevelEncoder
+ case "color":
+ *e = LowercaseColorLevelEncoder
+ default:
+ *e = LowercaseLevelEncoder
+ }
+ return nil
+}
+
+// A TimeEncoder serializes a time.Time to a primitive type.
+type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
+
+// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
+// since the Unix epoch.
+func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ sec := float64(nanos) / float64(time.Second)
+ enc.AppendFloat64(sec)
+}
+
+// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
+// milliseconds since the Unix epoch.
+func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ millis := float64(nanos) / float64(time.Millisecond)
+ enc.AppendFloat64(millis)
+}
+
+// EpochNanosTimeEncoder serializes a time.Time to an integer number of
+// nanoseconds since the Unix epoch.
+func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(t.UnixNano())
+}
+
+// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
+// with millisecond precision.
+func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700"))
+}
+
+// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are
+// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to
+// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder.
+func (e *TimeEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "iso8601", "ISO8601":
+ *e = ISO8601TimeEncoder
+ case "millis":
+ *e = EpochMillisTimeEncoder
+ case "nanos":
+ *e = EpochNanosTimeEncoder
+ default:
+ *e = EpochTimeEncoder
+ }
+ return nil
+}
+
+// A DurationEncoder serializes a time.Duration to a primitive type.
+type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
+
+// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
+func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendFloat64(float64(d) / float64(time.Second))
+}
+
+// NanosDurationEncoder serializes a time.Duration to an integer number of
+// nanoseconds elapsed.
+func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(int64(d))
+}
+
+// StringDurationEncoder serializes a time.Duration using its built-in String
+// method.
+func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendString(d.String())
+}
+
+// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
+// to StringDurationEncoder, and anything else is unmarshaled to
+// NanosDurationEncoder.
+func (e *DurationEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "string":
+ *e = StringDurationEncoder
+ case "nanos":
+ *e = NanosDurationEncoder
+ default:
+ *e = SecondsDurationEncoder
+ }
+ return nil
+}
+
+// A CallerEncoder serializes an EntryCaller to a primitive type.
+type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
+
+// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
+// format.
+func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.String())
+}
+
+// ShortCallerEncoder serializes a caller in package/file:line format, trimming
+// all but the final directory from the full path.
+func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.TrimmedPath())
+}
+
+// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
+// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
+func (e *CallerEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullCallerEncoder
+ default:
+ *e = ShortCallerEncoder
+ }
+ return nil
+}
+
+// A NameEncoder serializes a period-separated logger name to a primitive
+// type.
+type NameEncoder func(string, PrimitiveArrayEncoder)
+
+// FullNameEncoder serializes the logger name as-is.
+func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
+ enc.AppendString(loggerName)
+}
+
+// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
+// unmarshaled to FullNameEncoder.
+func (e *NameEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullNameEncoder
+ default:
+ *e = FullNameEncoder
+ }
+ return nil
+}
+
+// An EncoderConfig allows users to configure the concrete encoders supplied by
+// zapcore.
+type EncoderConfig struct {
+ // Set the keys used for each log entry. If any key is empty, that portion
+ // of the entry is omitted.
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ // Configure the primitive representations of common complex types. For
+ // example, some users may want all time.Times serialized as floating-point
+ // seconds since epoch, while others may prefer ISO8601 strings.
+ EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
+ EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
+ EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
+ EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
+ // Unlike the other primitive type encoders, EncodeName is optional. The
+ // zero value falls back to FullNameEncoder.
+ EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+}
+
+// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
+// map- or struct-like object to the logging context. Like maps, ObjectEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ObjectEncoder interface {
+ // Logging-specific marshalers.
+ AddArray(key string, marshaler ArrayMarshaler) error
+ AddObject(key string, marshaler ObjectMarshaler) error
+
+ // Built-in types.
+ AddBinary(key string, value []byte) // for arbitrary bytes
+ AddByteString(key string, value []byte) // for UTF-8 encoded bytes
+ AddBool(key string, value bool)
+ AddComplex128(key string, value complex128)
+ AddComplex64(key string, value complex64)
+ AddDuration(key string, value time.Duration)
+ AddFloat64(key string, value float64)
+ AddFloat32(key string, value float32)
+ AddInt(key string, value int)
+ AddInt64(key string, value int64)
+ AddInt32(key string, value int32)
+ AddInt16(key string, value int16)
+ AddInt8(key string, value int8)
+ AddString(key, value string)
+ AddTime(key string, value time.Time)
+ AddUint(key string, value uint)
+ AddUint64(key string, value uint64)
+ AddUint32(key string, value uint32)
+ AddUint16(key string, value uint16)
+ AddUint8(key string, value uint8)
+ AddUintptr(key string, value uintptr)
+
+ // AddReflected uses reflection to serialize arbitrary objects, so it's slow
+ // and allocation-heavy.
+ AddReflected(key string, value interface{}) error
+ // OpenNamespace opens an isolated namespace where all subsequent fields will
+ // be added. Applications can use namespaces to prevent key collisions when
+ // injecting loggers into sub-components or third-party libraries.
+ OpenNamespace(key string)
+}
+
+// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
+// array-like objects to the logging context. Of note, it supports mixed-type
+// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ArrayEncoder interface {
+ // Built-in types.
+ PrimitiveArrayEncoder
+
+ // Time-related types.
+ AppendDuration(time.Duration)
+ AppendTime(time.Time)
+
+ // Logging-specific marshalers.
+ AppendArray(ArrayMarshaler) error
+ AppendObject(ObjectMarshaler) error
+
+ // AppendReflected uses reflection to serialize arbitrary objects, so it's
+ // slow and allocation-heavy.
+ AppendReflected(value interface{}) error
+}
+
+// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
+// only in Go's built-in types. It's included only so that Duration- and
+// TimeEncoders cannot trigger infinite recursion.
+type PrimitiveArrayEncoder interface {
+ // Built-in types.
+ AppendBool(bool)
+ AppendByteString([]byte) // for UTF-8 encoded bytes
+ AppendComplex128(complex128)
+ AppendComplex64(complex64)
+ AppendFloat64(float64)
+ AppendFloat32(float32)
+ AppendInt(int)
+ AppendInt64(int64)
+ AppendInt32(int32)
+ AppendInt16(int16)
+ AppendInt8(int8)
+ AppendString(string)
+ AppendUint(uint)
+ AppendUint64(uint64)
+ AppendUint32(uint32)
+ AppendUint16(uint16)
+ AppendUint8(uint8)
+ AppendUintptr(uintptr)
+}
+
+// Encoder is a format-agnostic interface for all log entry marshalers. Since
+// log encoders don't need to support the same wide range of use cases as
+// general-purpose marshalers, it's possible to make them faster and
+// lower-allocation.
+//
+// Implementations of the ObjectEncoder interface's methods can, of course,
+// freely modify the receiver. However, the Clone and EncodeEntry methods will
+// be called concurrently and shouldn't modify the receiver.
+type Encoder interface {
+ ObjectEncoder
+
+ // Clone copies the encoder, ensuring that adding fields to the copy doesn't
+ // affect the original.
+ Clone() Encoder
+
+ // EncodeEntry encodes an entry and fields, along with any accumulated
+ // context, into a byte buffer and returns it.
+ EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
new file mode 100644
index 0000000000..7d9893f331
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -0,0 +1,257 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/exit"
+
+ "go.uber.org/multierr"
+)
+
+var (
+ _cePool = sync.Pool{New: func() interface{} {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+ }}
+)
+
+func getCheckedEntry() *CheckedEntry {
+ ce := _cePool.Get().(*CheckedEntry)
+ ce.reset()
+ return ce
+}
+
+func putCheckedEntry(ce *CheckedEntry) {
+ if ce == nil {
+ return
+ }
+ _cePool.Put(ce)
+}
+
+// NewEntryCaller makes an EntryCaller from the return signature of
+// runtime.Caller.
+func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
+ if !ok {
+ return EntryCaller{}
+ }
+ return EntryCaller{
+ PC: pc,
+ File: file,
+ Line: line,
+ Defined: true,
+ }
+}
+
+// EntryCaller represents the caller of a logging function.
+type EntryCaller struct {
+ Defined bool
+ PC uintptr
+ File string
+ Line int
+}
+
+// String returns the full path and line number of the caller.
+func (ec EntryCaller) String() string {
+ return ec.FullPath()
+}
+
+// FullPath returns a /full/path/to/package/file:line description of the
+// caller.
+func (ec EntryCaller) FullPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ buf := bufferpool.Get()
+ buf.AppendString(ec.File)
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// TrimmedPath returns a package/file:line description of the caller,
+// preserving only the leaf directory name and file name.
+func (ec EntryCaller) TrimmedPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+ //
+ // Find the last separator.
+ //
+ idx := strings.LastIndexByte(ec.File, '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(ec.File[:idx], '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ buf := bufferpool.Get()
+ // Keep everything after the penultimate separator.
+ buf.AppendString(ec.File[idx+1:])
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// An Entry represents a complete log message. The entry's structured context
+// is already serialized, but the log level, time, message, and call site
+// information are available for inspection and modification.
+//
+// Entries are pooled, so any functions that accept them MUST be careful not to
+// retain references to them.
+type Entry struct {
+ Level Level
+ Time time.Time
+ LoggerName string
+ Message string
+ Caller EntryCaller
+ Stack string
+}
+
+// CheckWriteAction indicates what action to take after a log entry is
+// processed. Actions are ordered in increasing severity.
+type CheckWriteAction uint8
+
+const (
+ // WriteThenNoop indicates that nothing special needs to be done. It's the
+ // default behavior.
+ WriteThenNoop CheckWriteAction = iota
+ // WriteThenPanic causes a panic after Write.
+ WriteThenPanic
+ // WriteThenFatal causes a fatal os.Exit after Write.
+ WriteThenFatal
+)
+
+// CheckedEntry is an Entry together with a collection of Cores that have
+// already agreed to log it.
+//
+// CheckedEntry references should be created by calling AddCore or Should on a
+// nil *CheckedEntry. References are returned to a pool after Write, and MUST
+// NOT be retained after calling their Write method.
+type CheckedEntry struct {
+ Entry
+ ErrorOutput WriteSyncer
+ dirty bool // best-effort detection of pool misuse
+ should CheckWriteAction
+ cores []Core
+}
+
+func (ce *CheckedEntry) reset() {
+ ce.Entry = Entry{}
+ ce.ErrorOutput = nil
+ ce.dirty = false
+ ce.should = WriteThenNoop
+ for i := range ce.cores {
+ // don't keep references to cores
+ ce.cores[i] = nil
+ }
+ ce.cores = ce.cores[:0]
+}
+
+// Write writes the entry to the stored Cores, returns any errors, and returns
+// the CheckedEntry reference to a pool for immediate re-use. Finally, it
+// executes any required CheckWriteAction.
+func (ce *CheckedEntry) Write(fields ...Field) {
+ if ce == nil {
+ return
+ }
+
+ if ce.dirty {
+ if ce.ErrorOutput != nil {
+ // Make a best effort to detect unsafe re-use of this CheckedEntry.
+ // If the entry is dirty, log an internal error; because the
+ // CheckedEntry is being used after it was returned to the pool,
+ // the message may be an amalgamation from multiple call sites.
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry)
+ ce.ErrorOutput.Sync()
+ }
+ return
+ }
+ ce.dirty = true
+
+ var err error
+ for i := range ce.cores {
+ err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
+ }
+ if ce.ErrorOutput != nil {
+ if err != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err)
+ ce.ErrorOutput.Sync()
+ }
+ }
+
+ should, msg := ce.should, ce.Message
+ putCheckedEntry(ce)
+
+ switch should {
+ case WriteThenPanic:
+ panic(msg)
+ case WriteThenFatal:
+ exit.Exit()
+ }
+}
+
+// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
+// used by Core.Check implementations, and is safe to call on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.cores = append(ce.cores, core)
+ return ce
+}
+
+// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
+// Core will panic or fatal after writing this log entry. Like AddCore, it's
+// safe to call on nil CheckedEntry references.
+func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.should = should
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
new file mode 100644
index 0000000000..a67c7bacc9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -0,0 +1,120 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "sync"
+)
+
+// Encodes the given error into fields of an object. A field with the given
+// name is added for the error message.
+//
+// If the error implements fmt.Formatter, a field with the name ${key}Verbose
+// is also added with the full verbose error message.
+//
+// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
+// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
+// array of objects containing the errors this error was comprised of.
+//
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
+func encodeError(key string, err error, enc ObjectEncoder) error {
+ basic := err.Error()
+ enc.AddString(key, basic)
+
+ switch e := err.(type) {
+ case errorGroup:
+ return enc.AddArray(key+"Causes", errArray(e.Errors()))
+ case fmt.Formatter:
+ verbose := fmt.Sprintf("%+v", e)
+ if verbose != basic {
+ // This is a rich error type, like those produced by
+ // github.com/pkg/errors.
+ enc.AddString(key+"Verbose", verbose)
+ }
+ }
+ return nil
+}
+
+type errorGroup interface {
+ // Provides read-only access to the underlying list of errors, preferably
+ // without causing any allocs.
+ Errors() []error
+}
+
+type causer interface {
+ // Provides access to the error that caused this error.
+ Cause() error
+}
+
+// Note that errArry and errArrayElem are very similar to the version
+// implemented in the top-level error.go file. We can't re-use this because
+// that would require exporting errArray as part of the zapcore API.
+
+// Encodes a list of errors using the standard error encoding logic.
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+
+ el := newErrArrayElem(errs[i])
+ arr.AppendObject(el)
+ el.Free()
+ }
+ return nil
+}
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Encodes any error into a {"error": ...} re-using the same errors logic.
+//
+// May be passed in place of an array to build a single-element array.
+type errArrayElem struct{ err error }
+
+func newErrArrayElem(err error) *errArrayElem {
+ e := _errArrayElemPool.Get().(*errArrayElem)
+ e.err = err
+ return e
+}
+
+func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
+ return arr.AppendObject(e)
+}
+
+func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
+ return encodeError("error", e.err, enc)
+}
+
+func (e *errArrayElem) Free() {
+ e.err = nil
+ _errArrayElemPool.Put(e)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
new file mode 100644
index 0000000000..6a5e33e2f7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -0,0 +1,201 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+)
+
+// A FieldType indicates which member of the Field union struct should be used
+// and how it should be serialized.
+type FieldType uint8
+
+const (
+ // UnknownType is the default field type. Attempting to add it to an encoder will panic.
+ UnknownType FieldType = iota
+ // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
+ ArrayMarshalerType
+ // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
+ ObjectMarshalerType
+ // BinaryType indicates that the field carries an opaque binary blob.
+ BinaryType
+ // BoolType indicates that the field carries a bool.
+ BoolType
+ // ByteStringType indicates that the field carries UTF-8 encoded bytes.
+ ByteStringType
+ // Complex128Type indicates that the field carries a complex128.
+ Complex128Type
+ // Complex64Type indicates that the field carries a complex128.
+ Complex64Type
+ // DurationType indicates that the field carries a time.Duration.
+ DurationType
+ // Float64Type indicates that the field carries a float64.
+ Float64Type
+ // Float32Type indicates that the field carries a float32.
+ Float32Type
+ // Int64Type indicates that the field carries an int64.
+ Int64Type
+ // Int32Type indicates that the field carries an int32.
+ Int32Type
+ // Int16Type indicates that the field carries an int16.
+ Int16Type
+ // Int8Type indicates that the field carries an int8.
+ Int8Type
+ // StringType indicates that the field carries a string.
+ StringType
+ // TimeType indicates that the field carries a time.Time.
+ TimeType
+ // Uint64Type indicates that the field carries a uint64.
+ Uint64Type
+ // Uint32Type indicates that the field carries a uint32.
+ Uint32Type
+ // Uint16Type indicates that the field carries a uint16.
+ Uint16Type
+ // Uint8Type indicates that the field carries a uint8.
+ Uint8Type
+ // UintptrType indicates that the field carries a uintptr.
+ UintptrType
+ // ReflectType indicates that the field carries an interface{}, which should
+ // be serialized using reflection.
+ ReflectType
+ // NamespaceType signals the beginning of an isolated namespace. All
+ // subsequent fields should be added to the new namespace.
+ NamespaceType
+ // StringerType indicates that the field carries a fmt.Stringer.
+ StringerType
+ // ErrorType indicates that the field carries an error.
+ ErrorType
+ // SkipType indicates that the field is a no-op.
+ SkipType
+)
+
+// A Field is a marshaling operation used to add a key-value pair to a logger's
+// context. Most fields are lazily marshaled, so it's inexpensive to add fields
+// to disabled debug-level log statements.
+type Field struct {
+ Key string
+ Type FieldType
+ Integer int64
+ String string
+ Interface interface{}
+}
+
+// AddTo exports a field through the ObjectEncoder interface. It's primarily
+// useful to library authors, and shouldn't be necessary in most applications.
+func (f Field) AddTo(enc ObjectEncoder) {
+ var err error
+
+ switch f.Type {
+ case ArrayMarshalerType:
+ err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
+ case ObjectMarshalerType:
+ err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
+ case BinaryType:
+ enc.AddBinary(f.Key, f.Interface.([]byte))
+ case BoolType:
+ enc.AddBool(f.Key, f.Integer == 1)
+ case ByteStringType:
+ enc.AddByteString(f.Key, f.Interface.([]byte))
+ case Complex128Type:
+ enc.AddComplex128(f.Key, f.Interface.(complex128))
+ case Complex64Type:
+ enc.AddComplex64(f.Key, f.Interface.(complex64))
+ case DurationType:
+ enc.AddDuration(f.Key, time.Duration(f.Integer))
+ case Float64Type:
+ enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
+ case Float32Type:
+ enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
+ case Int64Type:
+ enc.AddInt64(f.Key, f.Integer)
+ case Int32Type:
+ enc.AddInt32(f.Key, int32(f.Integer))
+ case Int16Type:
+ enc.AddInt16(f.Key, int16(f.Integer))
+ case Int8Type:
+ enc.AddInt8(f.Key, int8(f.Integer))
+ case StringType:
+ enc.AddString(f.Key, f.String)
+ case TimeType:
+ if f.Interface != nil {
+ enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
+ } else {
+ // Fall back to UTC if location is nil.
+ enc.AddTime(f.Key, time.Unix(0, f.Integer))
+ }
+ case Uint64Type:
+ enc.AddUint64(f.Key, uint64(f.Integer))
+ case Uint32Type:
+ enc.AddUint32(f.Key, uint32(f.Integer))
+ case Uint16Type:
+ enc.AddUint16(f.Key, uint16(f.Integer))
+ case Uint8Type:
+ enc.AddUint8(f.Key, uint8(f.Integer))
+ case UintptrType:
+ enc.AddUintptr(f.Key, uintptr(f.Integer))
+ case ReflectType:
+ err = enc.AddReflected(f.Key, f.Interface)
+ case NamespaceType:
+ enc.OpenNamespace(f.Key)
+ case StringerType:
+ enc.AddString(f.Key, f.Interface.(fmt.Stringer).String())
+ case ErrorType:
+ encodeError(f.Key, f.Interface.(error), enc)
+ case SkipType:
+ break
+ default:
+ panic(fmt.Sprintf("unknown field type: %v", f))
+ }
+
+ if err != nil {
+ enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
+ }
+}
+
+// Equals returns whether two fields are equal. For non-primitive types such as
+// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
+func (f Field) Equals(other Field) bool {
+ if f.Type != other.Type {
+ return false
+ }
+ if f.Key != other.Key {
+ return false
+ }
+
+ switch f.Type {
+ case BinaryType, ByteStringType:
+ return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
+ case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
+ return reflect.DeepEqual(f.Interface, other.Interface)
+ default:
+ return f == other
+ }
+}
+
+func addFields(enc ObjectEncoder, fields []Field) {
+ for i := range fields {
+ fields[i].AddTo(enc)
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
new file mode 100644
index 0000000000..5db4afb302
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type hooked struct {
+ Core
+ funcs []func(Entry) error
+}
+
+// RegisterHooks wraps a Core and runs a collection of user-defined callback
+// hooks each time a message is logged. Execution of the callbacks is blocking.
+//
+// This offers users an easy way to register simple callbacks (e.g., metrics
+// collection) without implementing the full Core interface.
+func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
+ funcs := append([]func(Entry) error{}, hooks...)
+ return &hooked{
+ Core: core,
+ funcs: funcs,
+ }
+}
+
+func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ // Let the wrapped Core decide whether to log this message or not. This
+ // also gives the downstream a chance to register itself directly with the
+ // CheckedEntry.
+ if downstream := h.Core.Check(ent, ce); downstream != nil {
+ return downstream.AddCore(ent, h)
+ }
+ return ce
+}
+
+func (h *hooked) With(fields []Field) Core {
+ return &hooked{
+ Core: h.Core.With(fields),
+ funcs: h.funcs,
+ }
+}
+
+func (h *hooked) Write(ent Entry, _ []Field) error {
+ // Since our downstream had a chance to register itself directly with the
+ // CheckedMessage, we don't need to call it here.
+ var err error
+ for i := range h.funcs {
+ err = multierr.Append(err, h.funcs[i](ent))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
new file mode 100644
index 0000000000..2dc67d81e7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -0,0 +1,502 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "math"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+// For JSON-escaping; see jsonEncoder.safeAddString below.
+const _hex = "0123456789abcdef"
+
+var _jsonPool = sync.Pool{New: func() interface{} {
+ return &jsonEncoder{}
+}}
+
+func getJSONEncoder() *jsonEncoder {
+ return _jsonPool.Get().(*jsonEncoder)
+}
+
+func putJSONEncoder(enc *jsonEncoder) {
+ if enc.reflectBuf != nil {
+ enc.reflectBuf.Free()
+ }
+ enc.EncoderConfig = nil
+ enc.buf = nil
+ enc.spaced = false
+ enc.openNamespaces = 0
+ enc.reflectBuf = nil
+ enc.reflectEnc = nil
+ _jsonPool.Put(enc)
+}
+
+type jsonEncoder struct {
+ *EncoderConfig
+ buf *buffer.Buffer
+ spaced bool // include spaces after colons and commas
+ openNamespaces int
+
+ // for encoding generic values by reflection
+ reflectBuf *buffer.Buffer
+ reflectEnc *json.Encoder
+}
+
+// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
+// appropriately escapes all field keys and values.
+//
+// Note that the encoder doesn't deduplicate keys, so it's possible to produce
+// a message like
+// {"foo":"bar","foo":"baz"}
+// This is permitted by the JSON specification, but not encouraged. Many
+// libraries will ignore duplicate key-value pairs (typically keeping the last
+// pair) when unmarshaling, but users should attempt to avoid adding duplicate
+// keys.
+func NewJSONEncoder(cfg EncoderConfig) Encoder {
+ return newJSONEncoder(cfg, false)
+}
+
+func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ return &jsonEncoder{
+ EncoderConfig: &cfg,
+ buf: bufferpool.Get(),
+ spaced: spaced,
+ }
+}
+
+func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendArray(arr)
+}
+
+func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendObject(obj)
+}
+
+func (enc *jsonEncoder) AddBinary(key string, val []byte) {
+ enc.AddString(key, base64.StdEncoding.EncodeToString(val))
+}
+
+func (enc *jsonEncoder) AddByteString(key string, val []byte) {
+ enc.addKey(key)
+ enc.AppendByteString(val)
+}
+
+func (enc *jsonEncoder) AddBool(key string, val bool) {
+ enc.addKey(key)
+ enc.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
+ enc.addKey(key)
+ enc.AppendComplex128(val)
+}
+
+func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
+ enc.addKey(key)
+ enc.AppendDuration(val)
+}
+
+func (enc *jsonEncoder) AddFloat64(key string, val float64) {
+ enc.addKey(key)
+ enc.AppendFloat64(val)
+}
+
+func (enc *jsonEncoder) AddInt64(key string, val int64) {
+ enc.addKey(key)
+ enc.AppendInt64(val)
+}
+
+func (enc *jsonEncoder) resetReflectBuf() {
+ if enc.reflectBuf == nil {
+ enc.reflectBuf = bufferpool.Get()
+ enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
+ } else {
+ enc.reflectBuf.Reset()
+ }
+}
+
+func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
+ enc.resetReflectBuf()
+ err := enc.reflectEnc.Encode(obj)
+ if err != nil {
+ return err
+ }
+ enc.reflectBuf.TrimNewline()
+ enc.addKey(key)
+ _, err = enc.buf.Write(enc.reflectBuf.Bytes())
+ return err
+}
+
+func (enc *jsonEncoder) OpenNamespace(key string) {
+ enc.addKey(key)
+ enc.buf.AppendByte('{')
+ enc.openNamespaces++
+}
+
+func (enc *jsonEncoder) AddString(key, val string) {
+ enc.addKey(key)
+ enc.AppendString(val)
+}
+
+func (enc *jsonEncoder) AddTime(key string, val time.Time) {
+ enc.addKey(key)
+ enc.AppendTime(val)
+}
+
+func (enc *jsonEncoder) AddUint64(key string, val uint64) {
+ enc.addKey(key)
+ enc.AppendUint64(val)
+}
+
+func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('[')
+ err := arr.MarshalLogArray(enc)
+ enc.buf.AppendByte(']')
+ return err
+}
+
+func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('{')
+ err := obj.MarshalLogObject(enc)
+ enc.buf.AppendByte('}')
+ return err
+}
+
+func (enc *jsonEncoder) AppendBool(val bool) {
+ enc.addElementSeparator()
+ enc.buf.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AppendByteString(val []byte) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddByteString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendComplex128(val complex128) {
+ enc.addElementSeparator()
+ // Cast to a platform-independent, fixed-size type.
+ r, i := float64(real(val)), float64(imag(val))
+ enc.buf.AppendByte('"')
+ // Because we're always in a quoted string, we can use strconv without
+ // special-casing NaN and +/-Inf.
+ enc.buf.AppendFloat(r, 64)
+ enc.buf.AppendByte('+')
+ enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendByte('i')
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendDuration(val time.Duration) {
+ cur := enc.buf.Len()
+ enc.EncodeDuration(val, enc)
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
+ // JSON valid.
+ enc.AppendInt64(int64(val))
+ }
+}
+
+func (enc *jsonEncoder) AppendInt64(val int64) {
+ enc.addElementSeparator()
+ enc.buf.AppendInt(val)
+}
+
+func (enc *jsonEncoder) AppendReflected(val interface{}) error {
+ enc.resetReflectBuf()
+ err := enc.reflectEnc.Encode(val)
+ if err != nil {
+ return err
+ }
+ enc.reflectBuf.TrimNewline()
+ enc.addElementSeparator()
+ _, err = enc.buf.Write(enc.reflectBuf.Bytes())
+ return err
+}
+
+func (enc *jsonEncoder) AppendString(val string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTime(val time.Time) {
+ cur := enc.buf.Len()
+ enc.EncodeTime(val, enc)
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
+ // output JSON valid.
+ enc.AppendInt64(val.UnixNano())
+ }
+}
+
+func (enc *jsonEncoder) AppendUint64(val uint64) {
+ enc.addElementSeparator()
+ enc.buf.AppendUint(val)
+}
+
+func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
+func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+
+func (enc *jsonEncoder) Clone() Encoder {
+ clone := enc.clone()
+ clone.buf.Write(enc.buf.Bytes())
+ return clone
+}
+
+func (enc *jsonEncoder) clone() *jsonEncoder {
+ clone := getJSONEncoder()
+ clone.EncoderConfig = enc.EncoderConfig
+ clone.spaced = enc.spaced
+ clone.openNamespaces = enc.openNamespaces
+ clone.buf = bufferpool.Get()
+ return clone
+}
+
+func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ final := enc.clone()
+ final.buf.AppendByte('{')
+
+ if final.LevelKey != "" {
+ final.addKey(final.LevelKey)
+ cur := final.buf.Len()
+ final.EncodeLevel(ent.Level, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
+ // output JSON valid.
+ final.AppendString(ent.Level.String())
+ }
+ }
+ if final.TimeKey != "" {
+ final.AddTime(final.TimeKey, ent.Time)
+ }
+ if ent.LoggerName != "" && final.NameKey != "" {
+ final.addKey(final.NameKey)
+ cur := final.buf.Len()
+ nameEncoder := final.EncodeName
+
+ // if no name encoder provided, fall back to FullNameEncoder for backwards
+ // compatibility
+ if nameEncoder == nil {
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeName was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.LoggerName)
+ }
+ }
+ if ent.Caller.Defined && final.CallerKey != "" {
+ final.addKey(final.CallerKey)
+ cur := final.buf.Len()
+ final.EncodeCaller(ent.Caller, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeCaller was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.Caller.String())
+ }
+ }
+ if final.MessageKey != "" {
+ final.addKey(enc.MessageKey)
+ final.AppendString(ent.Message)
+ }
+ if enc.buf.Len() > 0 {
+ final.addElementSeparator()
+ final.buf.Write(enc.buf.Bytes())
+ }
+ addFields(final, fields)
+ final.closeOpenNamespaces()
+ if ent.Stack != "" && final.StacktraceKey != "" {
+ final.AddString(final.StacktraceKey, ent.Stack)
+ }
+ final.buf.AppendByte('}')
+ if final.LineEnding != "" {
+ final.buf.AppendString(final.LineEnding)
+ } else {
+ final.buf.AppendString(DefaultLineEnding)
+ }
+
+ ret := final.buf
+ putJSONEncoder(final)
+ return ret, nil
+}
+
+func (enc *jsonEncoder) truncate() {
+ enc.buf.Reset()
+}
+
+func (enc *jsonEncoder) closeOpenNamespaces() {
+ for i := 0; i < enc.openNamespaces; i++ {
+ enc.buf.AppendByte('}')
+ }
+}
+
+func (enc *jsonEncoder) addKey(key string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(key)
+ enc.buf.AppendByte('"')
+ enc.buf.AppendByte(':')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+}
+
+func (enc *jsonEncoder) addElementSeparator() {
+ last := enc.buf.Len() - 1
+ if last < 0 {
+ return
+ }
+ switch enc.buf.Bytes()[last] {
+ case '{', '[', ':', ',', ' ':
+ return
+ default:
+ enc.buf.AppendByte(',')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+ }
+}
+
+func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
+ enc.addElementSeparator()
+ switch {
+ case math.IsNaN(val):
+ enc.buf.AppendString(`"NaN"`)
+ case math.IsInf(val, 1):
+ enc.buf.AppendString(`"+Inf"`)
+ case math.IsInf(val, -1):
+ enc.buf.AppendString(`"-Inf"`)
+ default:
+ enc.buf.AppendFloat(val, bitSize)
+ }
+}
+
+// safeAddString JSON-escapes a string and appends it to the internal buffer.
+// Unlike the standard library's encoder, it doesn't attempt to protect the
+// user from browser vulnerabilities or JSONP-related problems.
+func (enc *jsonEncoder) safeAddString(s string) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.AppendString(s[i : i+size])
+ i += size
+ }
+}
+
+// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
+func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRune(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.Write(s[i : i+size])
+ i += size
+ }
+}
+
+// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
+func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
+ if b >= utf8.RuneSelf {
+ return false
+ }
+ if 0x20 <= b && b != '\\' && b != '"' {
+ enc.buf.AppendByte(b)
+ return true
+ }
+ switch b {
+ case '\\', '"':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte(b)
+ case '\n':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('n')
+ case '\r':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('r')
+ case '\t':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ enc.buf.AppendString(`\u00`)
+ enc.buf.AppendByte(_hex[b>>4])
+ enc.buf.AppendByte(_hex[b&0xF])
+ }
+ return true
+}
+
+func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
+ if r == utf8.RuneError && size == 1 {
+ enc.buf.AppendString(`\ufffd`)
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
new file mode 100644
index 0000000000..e575c9f432
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
+
+// A Level is a logging priority. Higher levels are more important.
+type Level int8
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel Level = iota - 1
+ // InfoLevel is the default logging priority.
+ InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+
+ _minLevel = DebugLevel
+ _maxLevel = FatalLevel
+)
+
+// String returns a lower-case ASCII representation of the log level.
+func (l Level) String() string {
+ switch l {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warn"
+ case ErrorLevel:
+ return "error"
+ case DPanicLevel:
+ return "dpanic"
+ case PanicLevel:
+ return "panic"
+ case FatalLevel:
+ return "fatal"
+ default:
+ return fmt.Sprintf("Level(%d)", l)
+ }
+}
+
+// CapitalString returns an all-caps ASCII representation of the log level.
+func (l Level) CapitalString() string {
+ // Printing levels in all-caps is common enough that we should export this
+ // functionality.
+ switch l {
+ case DebugLevel:
+ return "DEBUG"
+ case InfoLevel:
+ return "INFO"
+ case WarnLevel:
+ return "WARN"
+ case ErrorLevel:
+ return "ERROR"
+ case DPanicLevel:
+ return "DPANIC"
+ case PanicLevel:
+ return "PANIC"
+ case FatalLevel:
+ return "FATAL"
+ default:
+ return fmt.Sprintf("LEVEL(%d)", l)
+ }
+}
+
+// MarshalText marshals the Level to text. Note that the text representation
+// drops the -Level suffix (see example).
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
+// expects the text representation of a Level to drop the -Level suffix (see
+// example).
+//
+// In particular, this makes it easy to configure logging levels using YAML,
+// TOML, or JSON files.
+func (l *Level) UnmarshalText(text []byte) error {
+ if l == nil {
+ return errUnmarshalNilLevel
+ }
+ if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
+ return fmt.Errorf("unrecognized level: %q", text)
+ }
+ return nil
+}
+
+func (l *Level) unmarshalText(text []byte) bool {
+ switch string(text) {
+ case "debug", "DEBUG":
+ *l = DebugLevel
+ case "info", "INFO", "": // make the zero value useful
+ *l = InfoLevel
+ case "warn", "WARN":
+ *l = WarnLevel
+ case "error", "ERROR":
+ *l = ErrorLevel
+ case "dpanic", "DPANIC":
+ *l = DPanicLevel
+ case "panic", "PANIC":
+ *l = PanicLevel
+ case "fatal", "FATAL":
+ *l = FatalLevel
+ default:
+ return false
+ }
+ return true
+}
+
+// Set sets the level for the flag.Value interface.
+func (l *Level) Set(s string) error {
+ return l.UnmarshalText([]byte(s))
+}
+
+// Get gets the level for the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Enabled returns true if the given level is at or above this level.
+func (l Level) Enabled(lvl Level) bool {
+ return lvl >= l
+}
+
+// LevelEnabler decides whether a given logging level is enabled when logging a
+// message.
+//
+// Enablers are intended to be used to implement deterministic filters;
+// concerns like sampling are better implemented as a Core.
+//
+// Each concrete Level value implements a static LevelEnabler which returns
+// true for itself and all higher logging levels. For example WarnLevel.Enabled()
+// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
+// FatalLevel, but return false for InfoLevel and DebugLevel.
+type LevelEnabler interface {
+ Enabled(Level) bool
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
new file mode 100644
index 0000000000..7af8dadcb3
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level_strings.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/zap/internal/color"
+
+var (
+ _levelToColor = map[Level]color.Color{
+ DebugLevel: color.Magenta,
+ InfoLevel: color.Blue,
+ WarnLevel: color.Yellow,
+ ErrorLevel: color.Red,
+ DPanicLevel: color.Red,
+ PanicLevel: color.Red,
+ FatalLevel: color.Red,
+ }
+ _unknownLevelColor = color.Red
+
+ _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
+ _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
+)
+
+func init() {
+ for level, color := range _levelToColor {
+ _levelToLowercaseColorString[level] = color.Add(level.String())
+ _levelToCapitalColorString[level] = color.Add(level.CapitalString())
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
new file mode 100644
index 0000000000..2627a653df
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/marshaler.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// ObjectMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+type ObjectMarshaler interface {
+ MarshalLogObject(ObjectEncoder) error
+}
+
+// ObjectMarshalerFunc is a type adapter that turns a function into an
+// ObjectMarshaler.
+type ObjectMarshalerFunc func(ObjectEncoder) error
+
+// MarshalLogObject calls the underlying function.
+func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
+ return f(enc)
+}
+
+// ArrayMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+type ArrayMarshaler interface {
+ MarshalLogArray(ArrayEncoder) error
+}
+
+// ArrayMarshalerFunc is a type adapter that turns a function into an
+// ArrayMarshaler.
+type ArrayMarshalerFunc func(ArrayEncoder) error
+
+// MarshalLogArray calls the underlying function.
+func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
+ return f(enc)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
new file mode 100644
index 0000000000..6ef85b09c7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// MapObjectEncoder is an ObjectEncoder backed by a simple
+// map[string]interface{}. It's not fast enough for production use, but it's
+// helpful in tests.
+type MapObjectEncoder struct {
+ // Fields contains the entire encoded log context.
+ Fields map[string]interface{}
+ // cur is a pointer to the namespace we're currently writing to.
+ cur map[string]interface{}
+}
+
+// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
+func NewMapObjectEncoder() *MapObjectEncoder {
+ m := make(map[string]interface{})
+ return &MapObjectEncoder{
+ Fields: m,
+ cur: m,
+ }
+}
+
+// AddArray implements ObjectEncoder.
+func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
+ arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
+ err := v.MarshalLogArray(arr)
+ m.cur[key] = arr.elems
+ return err
+}
+
+// AddObject implements ObjectEncoder.
+func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
+ newMap := NewMapObjectEncoder()
+ m.cur[k] = newMap.Fields
+ return v.MarshalLogObject(newMap)
+}
+
+// AddBinary implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
+
+// AddByteString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
+
+// AddBool implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
+
+// AddDuration implements ObjectEncoder.
+func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
+
+// AddComplex128 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
+
+// AddComplex64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
+
+// AddFloat64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
+
+// AddFloat32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
+
+// AddInt implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
+
+// AddInt64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
+
+// AddInt32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
+
+// AddInt16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
+
+// AddInt8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
+
+// AddString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
+
+// AddTime implements ObjectEncoder.
+func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
+
+// AddUint implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
+
+// AddUint64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
+
+// AddUint32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
+
+// AddUint16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
+
+// AddUint8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
+
+// AddUintptr implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
+
+// AddReflected implements ObjectEncoder.
+func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
+ m.cur[k] = v
+ return nil
+}
+
+// OpenNamespace implements ObjectEncoder.
+func (m *MapObjectEncoder) OpenNamespace(k string) {
+ ns := make(map[string]interface{})
+ m.cur[k] = ns
+ m.cur = ns
+}
+
+// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
+// the MapObjectEncoder, it's not designed for production use.
+type sliceArrayEncoder struct {
+ elems []interface{}
+}
+
+func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
+ enc := &sliceArrayEncoder{}
+ err := v.MarshalLogArray(enc)
+ s.elems = append(s.elems, enc.elems)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
+ m := NewMapObjectEncoder()
+ err := v.MarshalLogObject(m)
+ s.elems = append(s.elems, m.Fields)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
+ s.elems = append(s.elems, v)
+ return nil
+}
+
+func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
new file mode 100644
index 0000000000..e316418636
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -0,0 +1,134 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+const (
+ _numLevels = _maxLevel - _minLevel + 1
+ _countersPerLevel = 4096
+)
+
+type counter struct {
+ resetAt atomic.Int64
+ counter atomic.Uint64
+}
+
+type counters [_numLevels][_countersPerLevel]counter
+
+func newCounters() *counters {
+ return &counters{}
+}
+
+func (cs *counters) get(lvl Level, key string) *counter {
+ i := lvl - _minLevel
+ j := fnv32a(key) % _countersPerLevel
+ return &cs[i][j]
+}
+
+// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
+func fnv32a(s string) uint32 {
+ const (
+ offset32 = 2166136261
+ prime32 = 16777619
+ )
+ hash := uint32(offset32)
+ for i := 0; i < len(s); i++ {
+ hash ^= uint32(s[i])
+ hash *= prime32
+ }
+ return hash
+}
+
+func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
+ tn := t.UnixNano()
+ resetAfter := c.resetAt.Load()
+ if resetAfter > tn {
+ return c.counter.Inc()
+ }
+
+ c.counter.Store(1)
+
+ newResetAfter := tn + tick.Nanoseconds()
+ if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ // We raced with another goroutine trying to reset, and it also reset
+ // the counter to 1, so we need to reincrement the counter.
+ return c.counter.Inc()
+ }
+
+ return 1
+}
+
+type sampler struct {
+ Core
+
+ counts *counters
+ tick time.Duration
+ first, thereafter uint64
+}
+
+// NewSampler creates a Core that samples incoming entries, which caps the CPU
+// and I/O load of logging while attempting to preserve a representative subset
+// of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
+ return &sampler{
+ Core: core,
+ tick: tick,
+ counts: newCounters(),
+ first: uint64(first),
+ thereafter: uint64(thereafter),
+ }
+}
+
+func (s *sampler) With(fields []Field) Core {
+ return &sampler{
+ Core: s.Core.With(fields),
+ tick: s.tick,
+ counts: s.counts,
+ first: s.first,
+ thereafter: s.thereafter,
+ }
+}
+
+func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !s.Enabled(ent.Level) {
+ return ce
+ }
+
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (n-s.first)%s.thereafter != 0 {
+ return ce
+ }
+ return s.Core.Check(ent, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
new file mode 100644
index 0000000000..07a32eef9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type multiCore []Core
+
+// NewTee creates a Core that duplicates log entries into two or more
+// underlying Cores.
+//
+// Calling it with a single Core returns the input unchanged, and calling
+// it with no input returns a no-op Core.
+func NewTee(cores ...Core) Core {
+ switch len(cores) {
+ case 0:
+ return NewNopCore()
+ case 1:
+ return cores[0]
+ default:
+ return multiCore(cores)
+ }
+}
+
+func (mc multiCore) With(fields []Field) Core {
+ clone := make(multiCore, len(mc))
+ for i := range mc {
+ clone[i] = mc[i].With(fields)
+ }
+ return clone
+}
+
+func (mc multiCore) Enabled(lvl Level) bool {
+ for i := range mc {
+ if mc[i].Enabled(lvl) {
+ return true
+ }
+ }
+ return false
+}
+
+func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ for i := range mc {
+ ce = mc[i].Check(ent, ce)
+ }
+ return ce
+}
+
+func (mc multiCore) Write(ent Entry, fields []Field) error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Write(ent, fields))
+ }
+ return err
+}
+
+func (mc multiCore) Sync() error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Sync())
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
new file mode 100644
index 0000000000..209e25fe22
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go
@@ -0,0 +1,123 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "io"
+ "sync"
+
+ "go.uber.org/multierr"
+)
+
+// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
+// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
+type WriteSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
+// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
+// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
+func AddSync(w io.Writer) WriteSyncer {
+ switch w := w.(type) {
+ case WriteSyncer:
+ return w
+ default:
+ return writerWrapper{w}
+ }
+}
+
+type lockedWriteSyncer struct {
+ sync.Mutex
+ ws WriteSyncer
+}
+
+// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
+// particular, *os.Files must be locked before use.
+func Lock(ws WriteSyncer) WriteSyncer {
+ if _, ok := ws.(*lockedWriteSyncer); ok {
+ // no need to layer on another lock
+ return ws
+ }
+ return &lockedWriteSyncer{ws: ws}
+}
+
+func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
+ s.Lock()
+ n, err := s.ws.Write(bs)
+ s.Unlock()
+ return n, err
+}
+
+func (s *lockedWriteSyncer) Sync() error {
+ s.Lock()
+ err := s.ws.Sync()
+ s.Unlock()
+ return err
+}
+
+type writerWrapper struct {
+ io.Writer
+}
+
+func (w writerWrapper) Sync() error {
+ return nil
+}
+
+type multiWriteSyncer []WriteSyncer
+
+// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
+// and sync calls, much like io.MultiWriter.
+func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
+ if len(ws) == 1 {
+ return ws[0]
+ }
+ // Copy to protect against https://github.com/golang/go/issues/7809
+ return multiWriteSyncer(append([]WriteSyncer(nil), ws...))
+}
+
+// See https://golang.org/src/io/multi.go
+// When not all underlying syncers write the same number of bytes,
+// the smallest number is returned even though Write() is called on
+// all of them.
+func (ws multiWriteSyncer) Write(p []byte) (int, error) {
+ var writeErr error
+ nWritten := 0
+ for _, w := range ws {
+ n, err := w.Write(p)
+ writeErr = multierr.Append(writeErr, err)
+ if nWritten == 0 && n != 0 {
+ nWritten = n
+ } else if n < nWritten {
+ nWritten = n
+ }
+ }
+ return nWritten, writeErr
+}
+
+func (ws multiWriteSyncer) Sync() error {
+ var err error
+ for _, w := range ws {
+ err = multierr.Append(err, w.Sync())
+ }
+ return err
+}
diff --git a/vendor/k8s.io/client-go/testing/BUILD.bazel b/vendor/k8s.io/client-go/testing/BUILD.bazel
new file mode 100644
index 0000000000..998b0f429d
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/BUILD.bazel
@@ -0,0 +1,28 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "actions.go",
+ "fake.go",
+ "fixture.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/k8s.io/client-go/testing",
+ importpath = "k8s.io/client-go/testing",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/evanphx/json-patch:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
+ "//vendor/k8s.io/client-go/rest:go_default_library",
+ ],
+)
diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go
new file mode 100644
index 0000000000..e6db578ed8
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/actions.go
@@ -0,0 +1,671 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "path"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Name = name
+
+ return action
+}
+
+func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+
+ return action
+}
+
+func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Name = name
+
+ return action
+}
+
+func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+
+ return action
+}
+
+func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ action.Namespace = namespace
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Object = object
+
+ return action
+}
+
+func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Object = object
+
+ return action
+}
+
+func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+ action.Object = object
+
+ return action
+}
+
+func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Subresource = subresource
+ action.Name = name
+ action.Object = object
+
+ return action
+}
+
+func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Object = object
+
+ return action
+}
+
+func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Object = object
+
+ return action
+}
+
+func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+
+ return action
+}
+
+func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+
+ return action
+}
+
+func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Subresource = path.Join(subresources...)
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+
+ return action
+}
+
+func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Subresource = path.Join(subresources...)
+ action.Namespace = namespace
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+
+ return action
+}
+
+func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Object = object
+
+ return action
+}
+func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Object = object
+
+ return action
+}
+
+func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Name = name
+
+ return action
+}
+
+func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+
+ return action
+}
+
+func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+
+ return action
+}
+
+func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Name = name
+
+ return action
+}
+
+func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl {
+ action := DeleteCollectionActionImpl{}
+ action.Verb = "delete-collection"
+ action.Resource = resource
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl {
+ action := DeleteCollectionActionImpl{}
+ action.Verb = "delete-collection"
+ action.Resource = resource
+ action.Namespace = namespace
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl {
+ action := WatchActionImpl{}
+ action.Verb = "watch"
+ action.Resource = resource
+ labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
+ action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
+
+ return action
+}
+
+func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) {
+ var err error
+ switch t := opts.(type) {
+ case metav1.ListOptions:
+ labelSelector, err = labels.Parse(t.LabelSelector)
+ if err != nil {
+ panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err))
+ }
+ fieldSelector, err = fields.ParseSelector(t.FieldSelector)
+ if err != nil {
+ panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err))
+ }
+ resourceVersion = t.ResourceVersion
+ default:
+ panic(fmt.Errorf("expect a ListOptions %T", opts))
+ }
+ if labelSelector == nil {
+ labelSelector = labels.Everything()
+ }
+ if fieldSelector == nil {
+ fieldSelector = fields.Everything()
+ }
+ return labelSelector, fieldSelector, resourceVersion
+}
+
+func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl {
+ action := WatchActionImpl{}
+ action.Verb = "watch"
+ action.Resource = resource
+ action.Namespace = namespace
+ labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
+ action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
+
+ return action
+}
+
+func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl {
+ action := ProxyGetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Scheme = scheme
+ action.Name = name
+ action.Port = port
+ action.Path = path
+ action.Params = params
+ return action
+}
+
+type ListRestrictions struct {
+ Labels labels.Selector
+ Fields fields.Selector
+}
+type WatchRestrictions struct {
+ Labels labels.Selector
+ Fields fields.Selector
+ ResourceVersion string
+}
+
+type Action interface {
+ GetNamespace() string
+ GetVerb() string
+ GetResource() schema.GroupVersionResource
+ GetSubresource() string
+ Matches(verb, resource string) bool
+
+ // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this
+ // because the invocation logic deep copies before calls to storage and reactors.
+ DeepCopy() Action
+}
+
+type GenericAction interface {
+ Action
+ GetValue() interface{}
+}
+
+type GetAction interface {
+ Action
+ GetName() string
+}
+
+type ListAction interface {
+ Action
+ GetListRestrictions() ListRestrictions
+}
+
+type CreateAction interface {
+ Action
+ GetObject() runtime.Object
+}
+
+type UpdateAction interface {
+ Action
+ GetObject() runtime.Object
+}
+
+type DeleteAction interface {
+ Action
+ GetName() string
+}
+
+type DeleteCollectionAction interface {
+ Action
+ GetListRestrictions() ListRestrictions
+}
+
+type PatchAction interface {
+ Action
+ GetName() string
+ GetPatchType() types.PatchType
+ GetPatch() []byte
+}
+
+type WatchAction interface {
+ Action
+ GetWatchRestrictions() WatchRestrictions
+}
+
+type ProxyGetAction interface {
+ Action
+ GetScheme() string
+ GetName() string
+ GetPort() string
+ GetPath() string
+ GetParams() map[string]string
+}
+
+type ActionImpl struct {
+ Namespace string
+ Verb string
+ Resource schema.GroupVersionResource
+ Subresource string
+}
+
+func (a ActionImpl) GetNamespace() string {
+ return a.Namespace
+}
+func (a ActionImpl) GetVerb() string {
+ return a.Verb
+}
+func (a ActionImpl) GetResource() schema.GroupVersionResource {
+ return a.Resource
+}
+func (a ActionImpl) GetSubresource() string {
+ return a.Subresource
+}
+func (a ActionImpl) Matches(verb, resource string) bool {
+ return strings.ToLower(verb) == strings.ToLower(a.Verb) &&
+ strings.ToLower(resource) == strings.ToLower(a.Resource.Resource)
+}
+func (a ActionImpl) DeepCopy() Action {
+ ret := a
+ return ret
+}
+
+type GenericActionImpl struct {
+ ActionImpl
+ Value interface{}
+}
+
+func (a GenericActionImpl) GetValue() interface{} {
+ return a.Value
+}
+
+func (a GenericActionImpl) DeepCopy() Action {
+ return GenericActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ // TODO this is wrong, but no worse than before
+ Value: a.Value,
+ }
+}
+
+type GetActionImpl struct {
+ ActionImpl
+ Name string
+}
+
+func (a GetActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a GetActionImpl) DeepCopy() Action {
+ return GetActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ }
+}
+
+type ListActionImpl struct {
+ ActionImpl
+ Kind schema.GroupVersionKind
+ Name string
+ ListRestrictions ListRestrictions
+}
+
+func (a ListActionImpl) GetKind() schema.GroupVersionKind {
+ return a.Kind
+}
+
+func (a ListActionImpl) GetListRestrictions() ListRestrictions {
+ return a.ListRestrictions
+}
+
+func (a ListActionImpl) DeepCopy() Action {
+ return ListActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Kind: a.Kind,
+ Name: a.Name,
+ ListRestrictions: ListRestrictions{
+ Labels: a.ListRestrictions.Labels.DeepCopySelector(),
+ Fields: a.ListRestrictions.Fields.DeepCopySelector(),
+ },
+ }
+}
+
+type CreateActionImpl struct {
+ ActionImpl
+ Name string
+ Object runtime.Object
+}
+
+func (a CreateActionImpl) GetObject() runtime.Object {
+ return a.Object
+}
+
+func (a CreateActionImpl) DeepCopy() Action {
+ return CreateActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ Object: a.Object.DeepCopyObject(),
+ }
+}
+
+type UpdateActionImpl struct {
+ ActionImpl
+ Object runtime.Object
+}
+
+func (a UpdateActionImpl) GetObject() runtime.Object {
+ return a.Object
+}
+
+func (a UpdateActionImpl) DeepCopy() Action {
+ return UpdateActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Object: a.Object.DeepCopyObject(),
+ }
+}
+
+type PatchActionImpl struct {
+ ActionImpl
+ Name string
+ PatchType types.PatchType
+ Patch []byte
+}
+
+func (a PatchActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a PatchActionImpl) GetPatch() []byte {
+ return a.Patch
+}
+
+func (a PatchActionImpl) GetPatchType() types.PatchType {
+ return a.PatchType
+}
+
+func (a PatchActionImpl) DeepCopy() Action {
+ patch := make([]byte, len(a.Patch))
+ copy(patch, a.Patch)
+ return PatchActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ PatchType: a.PatchType,
+ Patch: patch,
+ }
+}
+
+type DeleteActionImpl struct {
+ ActionImpl
+ Name string
+}
+
+func (a DeleteActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a DeleteActionImpl) DeepCopy() Action {
+ return DeleteActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ }
+}
+
+type DeleteCollectionActionImpl struct {
+ ActionImpl
+ ListRestrictions ListRestrictions
+}
+
+func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions {
+ return a.ListRestrictions
+}
+
+func (a DeleteCollectionActionImpl) DeepCopy() Action {
+ return DeleteCollectionActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ ListRestrictions: ListRestrictions{
+ Labels: a.ListRestrictions.Labels.DeepCopySelector(),
+ Fields: a.ListRestrictions.Fields.DeepCopySelector(),
+ },
+ }
+}
+
+type WatchActionImpl struct {
+ ActionImpl
+ WatchRestrictions WatchRestrictions
+}
+
+func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions {
+ return a.WatchRestrictions
+}
+
+func (a WatchActionImpl) DeepCopy() Action {
+ return WatchActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ WatchRestrictions: WatchRestrictions{
+ Labels: a.WatchRestrictions.Labels.DeepCopySelector(),
+ Fields: a.WatchRestrictions.Fields.DeepCopySelector(),
+ ResourceVersion: a.WatchRestrictions.ResourceVersion,
+ },
+ }
+}
+
+type ProxyGetActionImpl struct {
+ ActionImpl
+ Scheme string
+ Name string
+ Port string
+ Path string
+ Params map[string]string
+}
+
+func (a ProxyGetActionImpl) GetScheme() string {
+ return a.Scheme
+}
+
+func (a ProxyGetActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a ProxyGetActionImpl) GetPort() string {
+ return a.Port
+}
+
+func (a ProxyGetActionImpl) GetPath() string {
+ return a.Path
+}
+
+func (a ProxyGetActionImpl) GetParams() map[string]string {
+ return a.Params
+}
+
+func (a ProxyGetActionImpl) DeepCopy() Action {
+ params := map[string]string{}
+ for k, v := range a.Params {
+ params[k] = v
+ }
+ return ProxyGetActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Scheme: a.Scheme,
+ Name: a.Name,
+ Port: a.Port,
+ Path: a.Path,
+ Params: params,
+ }
+}
diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go
new file mode 100644
index 0000000000..8b9ee149c8
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/fake.go
@@ -0,0 +1,216 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "sync"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+// Fake implements client.Interface. Meant to be embedded into a struct to get
+// a default implementation. This makes faking out just the method you want to
+// test easier.
+type Fake struct {
+ sync.RWMutex
+ actions []Action // these may be castable to other types, but "Action" is the minimum
+
+ // ReactionChain is the list of reactors that will be attempted for every
+ // request in the order they are tried.
+ ReactionChain []Reactor
+ // WatchReactionChain is the list of watch reactors that will be attempted
+ // for every request in the order they are tried.
+ WatchReactionChain []WatchReactor
+ // ProxyReactionChain is the list of proxy reactors that will be attempted
+ // for every request in the order they are tried.
+ ProxyReactionChain []ProxyReactor
+
+ Resources []*metav1.APIResourceList
+}
+
+// Reactor is an interface to allow the composition of reaction functions.
+type Reactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles the action and returns results. It may choose to
+ // delegate by indicated handled=false.
+ React(action Action) (handled bool, ret runtime.Object, err error)
+}
+
+// WatchReactor is an interface to allow the composition of watch functions.
+type WatchReactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles a watch action and returns results. It may choose to
+ // delegate by indicating handled=false.
+ React(action Action) (handled bool, ret watch.Interface, err error)
+}
+
+// ProxyReactor is an interface to allow the composition of proxy get
+// functions.
+type ProxyReactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles a watch action and returns results. It may choose to
+ // delegate by indicating handled=false.
+ React(action Action) (handled bool, ret restclient.ResponseWrapper, err error)
+}
+
+// ReactionFunc is a function that returns an object or error for a given
+// Action. If "handled" is false, then the test client will ignore the
+// results and continue to the next ReactionFunc. A ReactionFunc can describe
+// reactions on subresources by testing the result of the action's
+// GetSubresource() method.
+type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error)
+
+// WatchReactionFunc is a function that returns a watch interface. If
+// "handled" is false, then the test client will ignore the results and
+// continue to the next ReactionFunc.
+type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error)
+
+// ProxyReactionFunc is a function that returns a ResponseWrapper interface
+// for a given Action. If "handled" is false, then the test client will
+// ignore the results and continue to the next ProxyReactionFunc.
+type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error)
+
+// AddReactor appends a reactor to the end of the chain.
+func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) {
+ c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction})
+}
+
+// PrependReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) {
+ c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...)
+}
+
+// AddWatchReactor appends a reactor to the end of the chain.
+func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction})
+}
+
+// PrependWatchReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...)
+}
+
+// AddProxyReactor appends a reactor to the end of the chain.
+func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) {
+ c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction})
+}
+
+// PrependProxyReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) {
+ c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...)
+}
+
+// Invokes records the provided Action and then invokes the ReactionFunc that
+// handles the action if one exists. defaultReturnObj is expected to be of the
+// same type a normal call would return.
+func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.ReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled {
+ continue
+ }
+
+ return ret, err
+ }
+
+ return defaultReturnObj, nil
+}
+
+// InvokesWatch records the provided Action and then invokes the ReactionFunc
+// that handles the action if one exists.
+func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.WatchReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled {
+ continue
+ }
+
+ return ret, err
+ }
+
+ return nil, fmt.Errorf("unhandled watch: %#v", action)
+}
+
+// InvokesProxy records the provided Action and then invokes the ReactionFunc
+// that handles the action if one exists.
+func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.ProxyReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled || err != nil {
+ continue
+ }
+
+ return ret
+ }
+
+ return nil
+}
+
+// ClearActions clears the history of actions called on the fake client.
+func (c *Fake) ClearActions() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.actions = make([]Action, 0)
+}
+
+// Actions returns a chronologically ordered slice fake actions called on the
+// fake client.
+func (c *Fake) Actions() []Action {
+ c.RLock()
+ defer c.RUnlock()
+ fa := make([]Action, len(c.actions))
+ copy(fa, c.actions)
+ return fa
+}
diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go
new file mode 100644
index 0000000000..993fcf6a1b
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/fixture.go
@@ -0,0 +1,557 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "sync"
+
+ jsonpatch "github.com/evanphx/json-patch"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+// ObjectTracker keeps track of objects. It is intended to be used to
+// fake calls to a server by returning objects based on their kind,
+// namespace and name.
+type ObjectTracker interface {
+ // Add adds an object to the tracker. If object being added
+ // is a list, its items are added separately.
+ Add(obj runtime.Object) error
+
+ // Get retrieves the object by its kind, namespace and name.
+ Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error)
+
+ // Create adds an object to the tracker in the specified namespace.
+ Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error
+
+ // Update updates an existing object in the tracker in the specified namespace.
+ Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error
+
+ // List retrieves all objects of a given kind in the given
+ // namespace. Only non-List kinds are accepted.
+ List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error)
+
+ // Delete deletes an existing object from the tracker. If object
+ // didn't exist in the tracker prior to deletion, Delete returns
+ // no error.
+ Delete(gvr schema.GroupVersionResource, ns, name string) error
+
+ // Watch watches objects from the tracker. Watch returns a channel
+ // which will push added / modified / deleted object.
+ Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error)
+}
+
+// ObjectScheme abstracts the implementation of common operations on objects.
+type ObjectScheme interface {
+ runtime.ObjectCreater
+ runtime.ObjectTyper
+}
+
+// ObjectReaction returns a ReactionFunc that applies core.Action to
+// the given tracker.
+func ObjectReaction(tracker ObjectTracker) ReactionFunc {
+ return func(action Action) (bool, runtime.Object, error) {
+ ns := action.GetNamespace()
+ gvr := action.GetResource()
+ // Here and below we need to switch on implementation types,
+ // not on interfaces, as some interfaces are identical
+ // (e.g. UpdateAction and CreateAction), so if we use them,
+ // updates and creates end up matching the same case branch.
+ switch action := action.(type) {
+
+ case ListActionImpl:
+ obj, err := tracker.List(gvr, action.GetKind(), ns)
+ return true, obj, err
+
+ case GetActionImpl:
+ obj, err := tracker.Get(gvr, ns, action.GetName())
+ return true, obj, err
+
+ case CreateActionImpl:
+ objMeta, err := meta.Accessor(action.GetObject())
+ if err != nil {
+ return true, nil, err
+ }
+ if action.GetSubresource() == "" {
+ err = tracker.Create(gvr, action.GetObject(), ns)
+ } else {
+ // TODO: Currently we're handling subresource creation as an update
+ // on the enclosing resource. This works for some subresources but
+ // might not be generic enough.
+ err = tracker.Update(gvr, action.GetObject(), ns)
+ }
+ if err != nil {
+ return true, nil, err
+ }
+ obj, err := tracker.Get(gvr, ns, objMeta.GetName())
+ return true, obj, err
+
+ case UpdateActionImpl:
+ objMeta, err := meta.Accessor(action.GetObject())
+ if err != nil {
+ return true, nil, err
+ }
+ err = tracker.Update(gvr, action.GetObject(), ns)
+ if err != nil {
+ return true, nil, err
+ }
+ obj, err := tracker.Get(gvr, ns, objMeta.GetName())
+ return true, obj, err
+
+ case DeleteActionImpl:
+ err := tracker.Delete(gvr, ns, action.GetName())
+ if err != nil {
+ return true, nil, err
+ }
+ return true, nil, nil
+
+ case PatchActionImpl:
+ obj, err := tracker.Get(gvr, ns, action.GetName())
+ if err != nil {
+ return true, nil, err
+ }
+
+ old, err := json.Marshal(obj)
+ if err != nil {
+ return true, nil, err
+ }
+
+ switch action.GetPatchType() {
+ case types.JSONPatchType:
+ patch, err := jsonpatch.DecodePatch(action.GetPatch())
+ if err != nil {
+ return true, nil, err
+ }
+ modified, err := patch.Apply(old)
+ if err != nil {
+ return true, nil, err
+ }
+ if err = json.Unmarshal(modified, obj); err != nil {
+ return true, nil, err
+ }
+ case types.MergePatchType:
+ modified, err := jsonpatch.MergePatch(old, action.GetPatch())
+ if err != nil {
+ return true, nil, err
+ }
+
+ if err := json.Unmarshal(modified, obj); err != nil {
+ return true, nil, err
+ }
+ case types.StrategicMergePatchType:
+ mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj)
+ if err != nil {
+ return true, nil, err
+ }
+ if err = json.Unmarshal(mergedByte, obj); err != nil {
+ return true, nil, err
+ }
+ default:
+ return true, nil, fmt.Errorf("PatchType is not supported")
+ }
+
+ if err = tracker.Update(gvr, obj, ns); err != nil {
+ return true, nil, err
+ }
+
+ return true, obj, nil
+
+ default:
+ return false, nil, fmt.Errorf("no reaction implemented for %s", action)
+ }
+ }
+}
+
+type tracker struct {
+ scheme ObjectScheme
+ decoder runtime.Decoder
+ lock sync.RWMutex
+ objects map[schema.GroupVersionResource][]runtime.Object
+ // The value type of watchers is a map of which the key is either a namespace or
+ // all/non namespace aka "" and its value is list of fake watchers.
+ // Manipulations on resources will broadcast the notification events into the
+ // watchers' channel. Note that too many unhandled events (currently 100,
+ // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic.
+ watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher
+}
+
+var _ ObjectTracker = &tracker{}
+
+// NewObjectTracker returns an ObjectTracker that can be used to keep track
+// of objects for the fake clientset. Mostly useful for unit tests.
+func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker {
+ return &tracker{
+ scheme: scheme,
+ decoder: decoder,
+ objects: make(map[schema.GroupVersionResource][]runtime.Object),
+ watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher),
+ }
+}
+
+func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
+ // Heuristic for list kind: original kind + List suffix. Might
+ // not always be true but this tracker has a pretty limited
+ // understanding of the actual API model.
+ listGVK := gvk
+ listGVK.Kind = listGVK.Kind + "List"
+ // GVK does have the concept of "internal version". The scheme recognizes
+ // the runtime.APIVersionInternal, but not the empty string.
+ if listGVK.Version == "" {
+ listGVK.Version = runtime.APIVersionInternal
+ }
+
+ list, err := t.scheme.New(listGVK)
+ if err != nil {
+ return nil, err
+ }
+
+ if !meta.IsListType(list) {
+ return nil, fmt.Errorf("%q is not a list type", listGVK.Kind)
+ }
+
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ objs, ok := t.objects[gvr]
+ if !ok {
+ return list, nil
+ }
+
+ matchingObjs, err := filterByNamespaceAndName(objs, ns, "")
+ if err != nil {
+ return nil, err
+ }
+ if err := meta.SetList(list, matchingObjs); err != nil {
+ return nil, err
+ }
+ return list.DeepCopyObject(), nil
+}
+
+func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ fakewatcher := watch.NewRaceFreeFake()
+
+ if _, exists := t.watchers[gvr]; !exists {
+ t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher)
+ }
+ t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher)
+ return fakewatcher, nil
+}
+
+func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) {
+ errNotFound := errors.NewNotFound(gvr.GroupResource(), name)
+
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ objs, ok := t.objects[gvr]
+ if !ok {
+ return nil, errNotFound
+ }
+
+ matchingObjs, err := filterByNamespaceAndName(objs, ns, name)
+ if err != nil {
+ return nil, err
+ }
+ if len(matchingObjs) == 0 {
+ return nil, errNotFound
+ }
+ if len(matchingObjs) > 1 {
+ return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name)
+ }
+
+ // Only one object should match in the tracker if it works
+ // correctly, as Add/Update methods enforce kind/namespace/name
+ // uniqueness.
+ obj := matchingObjs[0].DeepCopyObject()
+ if status, ok := obj.(*metav1.Status); ok {
+ if status.Status != metav1.StatusSuccess {
+ return nil, &errors.StatusError{ErrStatus: *status}
+ }
+ }
+
+ return obj, nil
+}
+
+func (t *tracker) Add(obj runtime.Object) error {
+ if meta.IsListType(obj) {
+ return t.addList(obj, false)
+ }
+ objMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ gvks, _, err := t.scheme.ObjectKinds(obj)
+ if err != nil {
+ return err
+ }
+ if len(gvks) == 0 {
+ return fmt.Errorf("no registered kinds for %v", obj)
+ }
+ for _, gvk := range gvks {
+ // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The
+ // actual registration in apiserver can specify arbitrary route for a
+ // gvk. If a test uses such objects, it cannot preset the tracker with
+ // objects via Add(). Instead, it should trigger the Create() function
+ // of the tracker, where an arbitrary gvr can be specified.
+ gvr, _ := meta.UnsafeGuessKindToResource(gvk)
+ // Resource doesn't have the concept of "__internal" version, just set it to "".
+ if gvr.Version == runtime.APIVersionInternal {
+ gvr.Version = ""
+ }
+
+ err := t.add(gvr, obj, objMeta.GetNamespace(), false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
+ return t.add(gvr, obj, ns, false)
+}
+
+func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
+ return t.add(gvr, obj, ns, true)
+}
+
+func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher {
+ watches := []*watch.RaceFreeFakeWatcher{}
+ if t.watchers[gvr] != nil {
+ if w := t.watchers[gvr][ns]; w != nil {
+ watches = append(watches, w...)
+ }
+ if ns != metav1.NamespaceAll {
+ if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil {
+ watches = append(watches, w...)
+ }
+ }
+ }
+ return watches
+}
+
+func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ gr := gvr.GroupResource()
+
+ // To avoid the object from being accidentally modified by caller
+ // after it's been added to the tracker, we always store the deep
+ // copy.
+ obj = obj.DeepCopyObject()
+
+ newMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+
+ // Propagate namespace to the new object if hasn't already been set.
+ if len(newMeta.GetNamespace()) == 0 {
+ newMeta.SetNamespace(ns)
+ }
+
+ if ns != newMeta.GetNamespace() {
+ msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace())
+ return errors.NewBadRequest(msg)
+ }
+
+ for i, existingObj := range t.objects[gvr] {
+ oldMeta, err := meta.Accessor(existingObj)
+ if err != nil {
+ return err
+ }
+ if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() {
+ if replaceExisting {
+ for _, w := range t.getWatches(gvr, ns) {
+ w.Modify(obj)
+ }
+ t.objects[gvr][i] = obj
+ return nil
+ }
+ return errors.NewAlreadyExists(gr, newMeta.GetName())
+ }
+ }
+
+ if replaceExisting {
+ // Tried to update but no matching object was found.
+ return errors.NewNotFound(gr, newMeta.GetName())
+ }
+
+ t.objects[gvr] = append(t.objects[gvr], obj)
+
+ for _, w := range t.getWatches(gvr, ns) {
+ w.Add(obj)
+ }
+
+ return nil
+}
+
+func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error {
+ list, err := meta.ExtractList(obj)
+ if err != nil {
+ return err
+ }
+ errs := runtime.DecodeList(list, t.decoder)
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ for _, obj := range list {
+ if err := t.Add(obj); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ found := false
+
+ for i, existingObj := range t.objects[gvr] {
+ objMeta, err := meta.Accessor(existingObj)
+ if err != nil {
+ return err
+ }
+ if objMeta.GetNamespace() == ns && objMeta.GetName() == name {
+ obj := t.objects[gvr][i]
+ t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...)
+ for _, w := range t.getWatches(gvr, ns) {
+ w.Delete(obj)
+ }
+ found = true
+ break
+ }
+ }
+
+ if found {
+ return nil
+ }
+
+ return errors.NewNotFound(gvr.GroupResource(), name)
+}
+
+// filterByNamespaceAndName returns all objects in the collection that
+// match provided namespace and name. Empty namespace matches
+// non-namespaced objects.
+func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime.Object, error) {
+ var res []runtime.Object
+
+ for _, obj := range objs {
+ acc, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ if ns != "" && acc.GetNamespace() != ns {
+ continue
+ }
+ if name != "" && acc.GetName() != name {
+ continue
+ }
+ res = append(res, obj)
+ }
+
+ return res, nil
+}
+
+func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc {
+ return func(action Action) (bool, watch.Interface, error) {
+ return true, watchInterface, err
+ }
+}
+
+// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions
+type SimpleReactor struct {
+ Verb string
+ Resource string
+
+ Reaction ReactionFunc
+}
+
+func (r *SimpleReactor) Handles(action Action) bool {
+ verbCovers := r.Verb == "*" || r.Verb == action.GetVerb()
+ if !verbCovers {
+ return false
+ }
+ resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
+ if !resourceCovers {
+ return false
+ }
+
+ return true
+}
+
+func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) {
+ return r.Reaction(action)
+}
+
+// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions
+type SimpleWatchReactor struct {
+ Resource string
+
+ Reaction WatchReactionFunc
+}
+
+func (r *SimpleWatchReactor) Handles(action Action) bool {
+ resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
+ if !resourceCovers {
+ return false
+ }
+
+ return true
+}
+
+func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) {
+ return r.Reaction(action)
+}
+
+// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions.
+type SimpleProxyReactor struct {
+ Resource string
+
+ Reaction ProxyReactionFunc
+}
+
+func (r *SimpleProxyReactor) Handles(action Action) bool {
+ resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
+ if !resourceCovers {
+ return false
+ }
+
+ return true
+}
+
+func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) {
+ return r.Reaction(action)
+}
diff --git a/vendor/k8s.io/utils/pointer/BUILD.bazel b/vendor/k8s.io/utils/pointer/BUILD.bazel
new file mode 100644
index 0000000000..7236e9db9e
--- /dev/null
+++ b/vendor/k8s.io/utils/pointer/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["pointer.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/k8s.io/utils/pointer",
+ importpath = "k8s.io/utils/pointer",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS
new file mode 100644
index 0000000000..0d6392752a
--- /dev/null
+++ b/vendor/k8s.io/utils/pointer/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- apelisse
+- stewart-yu
+- thockin
+reviewers:
+- apelisse
+- stewart-yu
+- thockin
diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go
new file mode 100644
index 0000000000..a11a540f46
--- /dev/null
+++ b/vendor/k8s.io/utils/pointer/pointer.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pointer
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when,
+// for example, an API struct is handled by plugins which need to distinguish
+// "no plugin accepted this spec" from "this spec is empty".
+//
+// This function is only valid for structs and pointers to structs. Any other
+// type will cause a panic. Passing a typed nil pointer will return true.
+func AllPtrFieldsNil(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ if !v.IsValid() {
+ panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return true
+ }
+ v = v.Elem()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
+ return false
+ }
+ }
+ return true
+}
+
+// Int32Ptr returns a pointer to an int32
+func Int32Ptr(i int32) *int32 {
+ return &i
+}
+
+// Int64Ptr returns a pointer to an int64
+func Int64Ptr(i int64) *int64 {
+ return &i
+}
+
+// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil,
+// else returns def.
+func Int32PtrDerefOr(ptr *int32, def int32) int32 {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
+
+// BoolPtr returns a pointer to a bool
+func BoolPtr(b bool) *bool {
+ return &b
+}
+
+// StringPtr returns a pointer to the passed string.
+func StringPtr(s string) *string {
+ return &s
+}
+
+// Float32Ptr returns a pointer to the passed float32.
+func Float32Ptr(i float32) *float32 {
+ return &i
+}
+
+// Float64Ptr returns a pointer to the passed float64.
+func Float64Ptr(i float64) *float64 {
+ return &i
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f57b5de61b..d6e7637f5a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -76,6 +76,8 @@ github.com/dgrijalva/jwt-go
github.com/evanphx/json-patch
# github.com/go-logr/logr v0.1.0
github.com/go-logr/logr
+# github.com/go-logr/zapr v0.1.0
+github.com/go-logr/zapr
# github.com/gobuffalo/flect v0.1.5
github.com/gobuffalo/flect
# github.com/gogo/protobuf v1.2.1
@@ -223,6 +225,17 @@ go.opencensus.io/metric/metricproducer
go.opencensus.io/stats/internal
go.opencensus.io/internal
go.opencensus.io/trace/internal
+# go.uber.org/atomic v1.3.2
+go.uber.org/atomic
+# go.uber.org/multierr v1.1.0
+go.uber.org/multierr
+# go.uber.org/zap v1.9.1
+go.uber.org/zap
+go.uber.org/zap/buffer
+go.uber.org/zap/zapcore
+go.uber.org/zap/internal/bufferpool
+go.uber.org/zap/internal/color
+go.uber.org/zap/internal/exit
# golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20190613194153-d28f0bde5980
@@ -343,10 +356,10 @@ gopkg.in/tomb.v1
gopkg.in/yaml.v2
# k8s.io/api v0.0.0-20190711103429-37c3b8b1ca65 => k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d
k8s.io/api/core/v1
-k8s.io/api/apps/v1
k8s.io/api/authentication/v1
k8s.io/api/autoscaling/v1
k8s.io/api/policy/v1beta1
+k8s.io/api/apps/v1
k8s.io/api/admissionregistration/v1beta1
k8s.io/api/apps/v1beta1
k8s.io/api/apps/v1beta2
@@ -390,7 +403,6 @@ k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/util/json
-k8s.io/apimachinery/pkg/types
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/runtime/serializer
k8s.io/apimachinery/pkg/util/wait
@@ -399,6 +411,7 @@ k8s.io/apimachinery/pkg/conversion
k8s.io/apimachinery/pkg/fields
k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/selection
+k8s.io/apimachinery/pkg/types
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/watch
@@ -411,13 +424,13 @@ k8s.io/apimachinery/pkg/util/naming
k8s.io/apimachinery/pkg/util/sets
k8s.io/apimachinery/pkg/apis/meta/v1/validation
k8s.io/apimachinery/pkg/util/validation/field
+k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/runtime/serializer/json
k8s.io/apimachinery/pkg/runtime/serializer/protobuf
k8s.io/apimachinery/pkg/runtime/serializer/recognizer
k8s.io/apimachinery/pkg/runtime/serializer/versioning
k8s.io/apimachinery/pkg/util/version
-k8s.io/apimachinery/pkg/util/yaml
-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/strategicpatch
k8s.io/apimachinery/pkg/runtime/serializer/streaming
@@ -495,6 +508,7 @@ k8s.io/client-go/tools/clientcmd/api/v1
k8s.io/client-go/plugin/pkg/client/auth
k8s.io/client-go/tools/cache
k8s.io/client-go/restmapper
+k8s.io/client-go/testing
k8s.io/client-go/pkg/apis/clientauthentication
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
@@ -569,11 +583,12 @@ k8s.io/kubernetes/pkg/registry/core/service/ipallocator
k8s.io/kubernetes/pkg/apis/core
k8s.io/kubernetes/pkg/registry/core/service/allocator
# k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5
+k8s.io/utils/pointer
k8s.io/utils/exec
k8s.io/utils/integer
k8s.io/utils/buffer
k8s.io/utils/trace
-# sigs.k8s.io/cluster-api v0.0.0-20190711203908-5ffab93802d7 => sigs.k8s.io/cluster-api v0.0.0-20190711203908-5ffab93802d7
+# sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106 => sigs.k8s.io/cluster-api v0.0.0-20190716185847-a507c44fc106
sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset
sigs.k8s.io/cluster-api/cmd/clusterctl/cmd
sigs.k8s.io/cluster-api/pkg/apis/cluster/common
@@ -581,9 +596,10 @@ sigs.k8s.io/cluster-api/pkg/apis
sigs.k8s.io/cluster-api/pkg/controller/cluster
sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1
sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2
+sigs.k8s.io/cluster-api/pkg/errors
+sigs.k8s.io/cluster-api/pkg/util
sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha2
sigs.k8s.io/cluster-api/pkg/controller/remote
-sigs.k8s.io/cluster-api/pkg/errors
sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/deprecated/v1alpha1
sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd
sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer
@@ -593,7 +609,6 @@ sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider
sigs.k8s.io/cluster-api/cmd/clusterctl/phases
sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents
sigs.k8s.io/cluster-api/cmd/clusterctl/validation
-sigs.k8s.io/cluster-api/pkg/util
sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme
sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/existing
sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind
@@ -602,6 +617,7 @@ sigs.k8s.io/cluster-api/pkg/controller/noderefutil
# sigs.k8s.io/controller-runtime v0.2.0-beta.4
sigs.k8s.io/controller-runtime/pkg/client/config
sigs.k8s.io/controller-runtime/pkg/manager
+sigs.k8s.io/controller-runtime/pkg/runtime/log
sigs.k8s.io/controller-runtime/pkg/runtime/signals
sigs.k8s.io/controller-runtime/pkg/runtime/scheme
sigs.k8s.io/controller-runtime/pkg/client
@@ -618,21 +634,23 @@ sigs.k8s.io/controller-runtime/pkg/metrics
sigs.k8s.io/controller-runtime/pkg/recorder
sigs.k8s.io/controller-runtime/pkg/runtime/inject
sigs.k8s.io/controller-runtime/pkg/webhook
+sigs.k8s.io/controller-runtime/pkg/log
+sigs.k8s.io/controller-runtime/pkg/log/zap
sigs.k8s.io/controller-runtime/pkg/manager/signals
sigs.k8s.io/controller-runtime/pkg/scheme
sigs.k8s.io/controller-runtime/pkg/envtest
+sigs.k8s.io/controller-runtime/pkg/client/fake
sigs.k8s.io/controller-runtime/pkg/internal/controller
sigs.k8s.io/controller-runtime/pkg/predicate
sigs.k8s.io/controller-runtime/pkg/event
sigs.k8s.io/controller-runtime/pkg/source/internal
-sigs.k8s.io/controller-runtime/pkg/log
sigs.k8s.io/controller-runtime/pkg/cache/internal
sigs.k8s.io/controller-runtime/pkg/webhook/admission
sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher
sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
sigs.k8s.io/controller-runtime/pkg/envtest/printer
-sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics
sigs.k8s.io/controller-runtime/pkg/internal/objectutil
+sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics
# sigs.k8s.io/controller-tools v0.2.0-beta.3
sigs.k8s.io/controller-tools/cmd/controller-gen
sigs.k8s.io/controller-tools/pkg/crd
diff --git a/vendor/sigs.k8s.io/cluster-api/README.md b/vendor/sigs.k8s.io/cluster-api/README.md
index 377d2d6d85..d404a7fe06 100644
--- a/vendor/sigs.k8s.io/cluster-api/README.md
+++ b/vendor/sigs.k8s.io/cluster-api/README.md
@@ -10,6 +10,19 @@ feedback on the API types themselves. All of the code here is to experiment with
the API and demo its abilities, in order to drive more technical feedback to the
API design. Because of this, all of the prototype code is rapidly changing.
+## Getting Started
+
+### Resources
+
+* GitBook: [cluster-api.sigs.k8s.io](https://cluster-api.sigs.k8s.io)
+
+### Prerequisites
+* `kubectl` is required, see [here](http://kubernetes.io/docs/user-guide/prereqs/).
+* `clusterctl` is a SIG-cluster-lifecycle sponsored tool to manage Cluster API clusters. See [here](cmd/clusterctl)
+
+### Using `clusterctl` to create a cluster
+* Doc [here](./docs/how-to-use-clusterctl.md)
+
![Cluster API Architecture](./docs/book/common_code/architecture.svg "Cluster API Architecture")
Learn more about the project's [scope, objectives, goals and requirements](./docs/scope-and-objectives.md), [feature proposals](./docs/proposals/) and [reference use cases](./docs/staging-use-cases.md).
@@ -74,15 +87,7 @@ Following are the implementations managed by third-parties adopting the standard
- The _master_ branch is where development happens, this might include breaking changes.
- The _release-X_ branches contain stable, backward compatible code. A new _release-X_ branch is created at every major (X) release.
-## Getting Started
-### Resources
-
-* GitBook: [cluster-api.sigs.k8s.io](https://cluster-api.sigs.k8s.io)
-
-### Prerequisites
-* `kubectl` is required, see [here](http://kubernetes.io/docs/user-guide/prereqs/).
-* `clusterctl` is a SIG-cluster-lifecycle sponsored tool to manage Cluster API clusters. See [here](cmd/clusterctl)
[notes]: https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY/edit
[recordings]: https://www.youtube.com/playlist?list=PL69nYSiGNLP29D0nYgAGWt1ZFqS9Z7lw4
diff --git a/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml b/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml
index 74c2cfe1b9..60c51cd521 100644
--- a/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml
+++ b/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml
@@ -6,18 +6,6 @@ metadata:
creationTimestamp: null
name: manager-role
rules:
-- apiGroups:
- - ""
- resources:
- - nodes
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
- apiGroups:
- cluster.sigs.k8s.io
resources:
@@ -68,3 +56,15 @@ rules:
- get
- list
- watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
diff --git a/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md b/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md
new file mode 100644
index 0000000000..7afb140aa1
--- /dev/null
+++ b/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md
@@ -0,0 +1,85 @@
+# Using `clusterctl` to create a cluster from scratch
+
+This document provides an overview of how `clusterctl` works and explains how one can use `clusterctl`
+to create a Kubernetes cluster from scratch.
+
+## What is `clusterctl`?
+
+`clusterctl` is a CLI tool to create a Kubernetes cluster. `clusterctl` is provided by the [provider implementations](https://github.com/Kubernetes-sigs/cluster-api#provider-implementations).
+It uses Cluster API provider implementations to provision resources needed by the Kubernetes cluster.
+
+## Creating a cluster
+
+`clusterctl` needs 4 YAML files to start with: `provider-components.yaml`, `cluster.yaml`, `machines.yaml` ,
+`addons.yaml`.
+
+* `provider-components.yaml` contains the *Custom Resource Definitions ([CRDs](https://Kubernetes.io/docs/concepts/extend-Kubernetes/api-extension/custom-resources/))*
+of all the resources that are managed by Cluster API. Some examples of these resources
+are: `Cluster`, `Machine`, `MachineSet`, etc. For more details about Cluster API resources
+click [here](https://cluster-api.sigs.k8s.io/common_code/architecture.html#cluster-api-resources).
+* `cluster.yaml` defines an object of the resource type `Cluster`.
+* `machines.yaml` defines an object of the resource type `Machine`. Generally creates the machine
+that becomes the control-plane.
+* `addons.yaml` contains the addons for the provider.
+
+Many providers implementations come with helpful scripts to generate these YAMLS. Provider implementation
+can be found [here](https://github.com/Kubernetes-sigs/cluster-api#provider-implementations).
+
+`clusterctl` also comes with additional features. For example, `clusterctl` can also take in an optional
+`bootstrap-only-components.yaml` to provide resources to the bootstrap cluster without also providing them
+to the target cluster post-pivot.
+
+For more details about all the supported options run:
+
+```
+clusterctl create cluster --help
+```
+
+After generating the YAML files run the following command:
+
+```
+clusterctl create cluster --provider --bootstrap-type -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml
+```
+
+Example usage:
+
+```
+# VMware vSphere
+clusterctl create cluster --provider vsphere --bootstrap-type kind -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml
+
+# Amazon AWS
+clusterctl create cluster --provider aws --bootstrap-type kind -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml
+```
+
+**What happens when we run the command?**
+After running the command first it creates a local cluster. If `kind` was passed as the `--bootstrap-type`
+it creates a local [kind](https://kind.sigs.k8s.io/) cluster. This cluster is generally referred to as the *bootstrap cluster*.
+On this kind Kubernetes cluster the `provider-components.yaml` file is applied. This step loads the CRDs into
+the cluster. It also creates 2 [StatefulSet](https://Kubernetes.io/docs/concepts/workloads/controllers/statefulset/)
+pods that run the cluster api controller and the provider specific controller. These pods register the custom
+controllers that manage the newly defined resources (`Cluster`, `Machine`, `MachineSet`, etc).
+
+Next, `clusterctl` applies the `cluster.yaml` and `machines.yaml` to the local kind Kubernetes cluster. This
+step creates a Kubernetes cluster with only a control-plane(as defined in `machines.yaml`) on the specified
+provider. This newly created cluster is generally referred to as the *management cluster* or *pivot cluster*
+or *initial target cluster*. The management cluster is responsible for creating and maintaining the work-load cluster.
+
+Lastly, `clusterctl` moves all the CRDs and the custom controllers from the bootstrap cluster to the
+management cluster and deletes the locally created bootstrap cluster. This step is referred to as the *pivot*.
+
+### Creating a workload cluster using the management cluster
+
+The *workload cluster* also sometimes referred to as the *target cluster* is the Kubernetes cluster on to which
+the final application is deployed. The target cluster is responsible for handling the workload of the application,
+not the management cluster.
+
+As the management cluster is up we can create a workload cluster by simply applying the appropriate
+`cluster.yaml`, `machines.yaml` and `machineset.yaml` on the management cluster. This will create the VMs(Nodes)
+as defined in these YAMLs. Following this, a bootstrap mechanism is used to create a Kubernetes cluster on these VMs.
+While any of the several bootstrapping mechanisms can be used `kubeadm` is the popular option.
+
+**NOTE:** Workload clusters do not have any addons applied. Nodes in your workload clusters will be in the `NotReady`
+state until you apply addons for CNI plugin.
+
+Once the target cluster is up the user can create the `Deployments`, `Services`, etc that handle the workload
+of the application.
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go
index 02272e0047..4a07eed7b9 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go
@@ -49,7 +49,7 @@ func newReconciler(mgr manager.Manager, actuator Actuator) reconcile.Reconciler
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
- c, err := controller.New("cluster_controller", mgr, controller.Options{Reconciler: r})
+ c, err := controller.New("cluster-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go
index 4cf359f807..0727f004ab 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go
@@ -42,7 +42,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)
-const controllerName = "machine_controller"
+const controllerName = "machine-controller"
// Add creates a new Machine Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
@@ -114,8 +114,10 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul
// Cluster might be nil as some providers might not require a cluster object
// for machine management.
- cluster, err := r.getCluster(ctx, m)
- if err != nil {
+ cluster, err := util.GetClusterFromMetadata(ctx, r.Client, m.ObjectMeta)
+ if errors.Cause(err) == util.ErrNoCluster {
+ klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", m.Name, m.Namespace, clusterv1.MachineClusterLabelName)
+ } else if err != nil {
return reconcile.Result{}, err
}
@@ -204,25 +206,6 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul
return reconcile.Result{}, nil
}
-func (r *ReconcileMachine) getCluster(ctx context.Context, machine *clusterv1.Machine) (*clusterv1.Cluster, error) {
- if machine.Labels[clusterv1.MachineClusterLabelName] == "" {
- klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", machine.Name, machine.Namespace, clusterv1.MachineClusterLabelName)
- return nil, nil
- }
-
- cluster := &clusterv1.Cluster{}
- key := client.ObjectKey{
- Namespace: machine.Namespace,
- Name: machine.Labels[clusterv1.MachineClusterLabelName],
- }
-
- if err := r.Client.Get(ctx, key, cluster); err != nil {
- return nil, err
- }
-
- return cluster, nil
-}
-
var (
errNilNodeRef = errors.New("noderef is nil")
errLastControlPlaneNode = errors.New("last control plane member")
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go
index 8ac7c25a21..4dda8db264 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go
@@ -42,7 +42,7 @@ import (
)
// controllerName is the name of this controller
-const controllerName = "machinedeployment_controller"
+const controllerName = "machinedeployment-controller"
var (
// controllerKind contains the schema.GroupVersionKind for this controller type.
@@ -171,8 +171,10 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha2.
// Cluster might be nil as some providers might not require a cluster object
// for machine management.
- cluster, err := r.getCluster(d)
- if err != nil {
+ cluster, err := util.GetClusterFromMetadata(ctx, r.Client, d.ObjectMeta)
+ if errors.Cause(err) == util.ErrNoCluster {
+ klog.Infof("MachineDeployment %q in namespace %q doesn't specify %q label, assuming nil cluster", d.Name, d.Namespace, v1alpha2.MachineClusterLabelName)
+ } else if err != nil {
return reconcile.Result{}, err
}
@@ -247,7 +249,6 @@ func (r *ReconcileMachineDeployment) getCluster(d *v1alpha2.MachineDeployment) (
// getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment.
func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(d *v1alpha2.MachineDeployment) ([]*v1alpha2.MachineSet, error) {
-
// List all MachineSets to find those we own but that no longer match our selector.
machineSets := &v1alpha2.MachineSetList{}
if err := r.Client.List(context.Background(), machineSets, client.InNamespace(d.Namespace)); err != nil {
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go
index c70d3c3186..766c8b2b67 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go
@@ -46,7 +46,7 @@ import (
)
// controllerName is the name of this controller
-const controllerName = "machineset_controller"
+const controllerName = "machineset-controller"
var (
// controllerKind contains the schema.GroupVersionKind for this controller type.
@@ -180,8 +180,10 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster
// Cluster might be nil as some providers might not require a cluster object
// for machine management.
- cluster, err := r.getCluster(machineSet)
- if err != nil {
+ cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machineSet.ObjectMeta)
+ if errors.Cause(err) == util.ErrNoCluster {
+ klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", machineSet.Name, machineSet.Namespace, clusterv1.MachineClusterLabelName)
+ } else if err != nil {
return reconcile.Result{}, err
}
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel
index b38a3fecb3..6916ef8a25 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel
@@ -10,16 +10,20 @@ go_library(
importpath = "sigs.k8s.io/cluster-api/pkg/util",
visibility = ["//visibility:public"],
deps = [
+ "//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library",
],
)
@@ -27,4 +31,13 @@ go_test(
name = "go_default_test",
srcs = ["util_test.go"],
embed = [":go_default_library"],
+ deps = [
+ "//vendor/k8s.io/api/core/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
+ "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library",
+ ],
)
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go
index 1e55c8d14a..15e48346f2 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go
@@ -18,7 +18,6 @@ package util
import (
"context"
- "errors"
"fmt"
"io"
"math/rand"
@@ -29,15 +28,19 @@ import (
"strings"
"time"
+ "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/klog"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
@@ -49,7 +52,8 @@ const (
)
var (
- rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
+ ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.MachineClusterLabelName)
)
// RandomToken returns a random token.
@@ -66,22 +70,21 @@ func RandomString(n int) string {
return string(result)
}
-// GetControlPlaneMachine returns a control plane machine from input.
-// Deprecated: use GetControlPlaneMachines.
-func GetControlPlaneMachine(machines []*clusterv1.Machine) *clusterv1.Machine {
+// GetControlPlaneMachines returns a slice containing control plane machines.
+func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Machine) {
for _, machine := range machines {
if IsControlPlaneMachine(machine) {
- return machine
+ res = append(res, machine)
}
}
- return nil
+ return
}
-// GetControlPlaneMachines returns a slice containing control plane machines.
-func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Machine) {
- for _, machine := range machines {
- if IsControlPlaneMachine(machine) {
- res = append(res, machine)
+// GetControlPlaneMachinesFromList returns a slice containing control plane machines.
+func GetControlPlaneMachinesFromList(machineList *clusterv1.MachineList) (res []*clusterv1.Machine) {
+ for _, machine := range machineList.Items {
+ if IsControlPlaneMachine(&machine) {
+ res = append(res, &machine)
}
}
return
@@ -149,6 +152,75 @@ func IsNodeReady(node *v1.Node) bool {
return false
}
+// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata.
+func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) {
+ if obj.Labels[clusterv1.MachineClusterLabelName] == "" {
+ return nil, errors.WithStack(ErrNoCluster)
+ }
+ return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.MachineClusterLabelName])
+}
+
+// GetClusterByName finds and return a Cluster object using the specified params.
+func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) {
+ cluster := &clusterv1.Cluster{}
+ key := client.ObjectKey{
+ Namespace: namespace,
+ Name: name,
+ }
+
+ if err := c.Get(ctx, key, cluster); err != nil {
+ return nil, err
+ }
+
+ return cluster, nil
+}
+
+// GetOwnerMachine returns the Machine object owning the current resource.
+func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) {
+ for _, ref := range obj.OwnerReferences {
+ if ref.Kind == "Machine" && ref.APIVersion == clusterv1.SchemeGroupVersion.String() {
+ return GetMachineByName(ctx, c, obj.Namespace, obj.Name)
+ }
+ }
+ return nil, nil
+}
+
+// GetMachineByName finds and return a Machine object using the specified params.
+func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) {
+ m := &clusterv1.Machine{}
+ key := client.ObjectKey{Name: name, Namespace: namespace}
+ if err := c.Get(ctx, key, m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// MachineToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for
+// Machine events and returns reconciliation requests for an infrastructure provider object.
+func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.ToRequestsFunc {
+ return func(o handler.MapObject) []reconcile.Request {
+ m, ok := o.Object.(*clusterv1.Machine)
+ if !ok {
+ return nil
+ }
+
+ // Return early if the GroupVersionKind doesn't match what we expect.
+ infraGVK := m.Spec.InfrastructureRef.GroupVersionKind()
+ if gvk != infraGVK {
+ return nil
+ }
+
+ return []reconcile.Request{
+ {
+ NamespacedName: client.ObjectKey{
+ Namespace: m.Namespace,
+ Name: m.Spec.InfrastructureRef.Name,
+ },
+ },
+ }
+ }
+}
+
// HasOwnerRef returns true if the OwnerReference is already in the slice.
func HasOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool {
for _, r := range ownerReferences {
diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go
index d1dc35c09e..b0c1cb0964 100644
--- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go
+++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go
@@ -19,7 +19,16 @@ package util
import (
"io/ioutil"
"os"
+ "reflect"
"testing"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const validCluster = `
@@ -334,3 +343,78 @@ func createTempFile(contents string) (string, error) {
f.WriteString(contents)
return f.Name(), nil
}
+
+func TestMachineToInfrastructureMapFunc(t *testing.T) {
+ var testcases = []struct {
+ name string
+ input schema.GroupVersionKind
+ request handler.MapObject
+ output []reconcile.Request
+ }{
+ {
+ name: "should reconcile infra-1",
+ input: schema.GroupVersionKind{
+ Group: "foo.cluster.sigs.k8s.io",
+ Version: "v1alpha2",
+ Kind: "TestMachine",
+ },
+ request: handler.MapObject{
+ Object: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: "test-1",
+ },
+ Spec: clusterv1.MachineSpec{
+ InfrastructureRef: corev1.ObjectReference{
+ APIVersion: "foo.cluster.sigs.k8s.io/v1alpha2",
+ Kind: "TestMachine",
+ Name: "infra-1",
+ },
+ },
+ },
+ },
+ output: []reconcile.Request{
+ {
+ NamespacedName: client.ObjectKey{
+ Namespace: "default",
+ Name: "infra-1",
+ },
+ },
+ },
+ },
+ {
+ name: "should return no matching reconcile requests",
+ input: schema.GroupVersionKind{
+ Group: "foo.cluster.sigs.k8s.io",
+ Version: "v1alpha2",
+ Kind: "TestMachine",
+ },
+ request: handler.MapObject{
+ Object: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: "test-1",
+ },
+ Spec: clusterv1.MachineSpec{
+ InfrastructureRef: corev1.ObjectReference{
+ APIVersion: "bar.cluster.sigs.k8s.io/v1alpha2",
+ Kind: "TestMachine",
+ Name: "bar-1",
+ },
+ },
+ },
+ },
+ output: nil,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ fn := MachineToInfrastructureMapFunc(tc.input)
+ out := fn(tc.request)
+ if !reflect.DeepEqual(out, tc.output) {
+ t.Fatalf("Unexpected output. Got: %v, Want: %v", out, tc.output)
+ }
+ })
+ }
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/BUILD.bazel
new file mode 100644
index 0000000000..eb632a4ff3
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "client.go",
+ "doc.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake",
+ importpath = "sigs.k8s.io/controller-runtime/pkg/client/fake",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
+ "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
+ "//vendor/k8s.io/client-go/testing:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil:go_default_library",
+ ],
+)
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go
new file mode 100644
index 0000000000..7a348a5867
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/testing"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
+)
+
+type fakeClient struct {
+ tracker testing.ObjectTracker
+ scheme *runtime.Scheme
+}
+
+var _ client.Client = &fakeClient{}
+
+// NewFakeClient creates a new fake client for testing.
+// You can choose to initialize it with a slice of runtime.Object.
+// Deprecated: use NewFakeClientWithScheme. You should always be
+// passing an explicit Scheme.
+func NewFakeClient(initObjs ...runtime.Object) client.Client {
+ return NewFakeClientWithScheme(scheme.Scheme, initObjs...)
+}
+
+// NewFakeClientWithScheme creates a new fake client with the given scheme
+// for testing.
+// You can choose to initialize it with a slice of runtime.Object.
+func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client {
+ tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder())
+ for _, obj := range initObjs {
+ err := tracker.Add(obj)
+ if err != nil {
+ panic(fmt.Errorf("failed to add object %v to fake client: %v", obj, err))
+ }
+ }
+ return &fakeClient{
+ tracker: tracker,
+ scheme: clientScheme,
+ }
+}
+
+func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
+ gvr, err := getGVRFromObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+ o, err := c.tracker.Get(gvr, key.Namespace, key.Name)
+ if err != nil {
+ return err
+ }
+ j, err := json.Marshal(o)
+ if err != nil {
+ return err
+ }
+ decoder := scheme.Codecs.UniversalDecoder()
+ _, _, err = decoder.Decode(j, nil, obj)
+ return err
+}
+
+func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOptionFunc) error {
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasSuffix(gvk.Kind, "List") {
+ return fmt.Errorf("non-list type %T (kind %q) passed as output", obj, gvk)
+ }
+ // we need the non-list GVK, so chop off the "List" from the end of the kind
+ gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
+
+ listOpts := client.ListOptions{}
+ listOpts.ApplyOptions(opts)
+
+ gvr, _ := meta.UnsafeGuessKindToResource(gvk)
+ o, err := c.tracker.List(gvr, gvk, listOpts.Namespace)
+ if err != nil {
+ return err
+ }
+ j, err := json.Marshal(o)
+ if err != nil {
+ return err
+ }
+ decoder := scheme.Codecs.UniversalDecoder()
+ _, _, err = decoder.Decode(j, nil, obj)
+ if err != nil {
+ return err
+ }
+
+ if listOpts.LabelSelector != nil {
+ objs, err := meta.ExtractList(obj)
+ if err != nil {
+ return err
+ }
+ filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector)
+ if err != nil {
+ return err
+ }
+ err = meta.SetList(obj, filteredObjs)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOptionFunc) error {
+ createOptions := &client.CreateOptions{}
+ createOptions.ApplyOptions(opts)
+
+ for _, dryRunOpt := range createOptions.DryRun {
+ if dryRunOpt == metav1.DryRunAll {
+ return nil
+ }
+ }
+
+ gvr, err := getGVRFromObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ return c.tracker.Create(gvr, obj, accessor.GetNamespace())
+}
+
+func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOptionFunc) error {
+ gvr, err := getGVRFromObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ //TODO: implement propagation
+ return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName())
+}
+
+func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOptionFunc) error {
+ updateOptions := &client.UpdateOptions{}
+ updateOptions.ApplyOptions(opts)
+
+ for _, dryRunOpt := range updateOptions.DryRun {
+ if dryRunOpt == metav1.DryRunAll {
+ return nil
+ }
+ }
+
+ gvr, err := getGVRFromObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ return c.tracker.Update(gvr, obj, accessor.GetNamespace())
+}
+
+func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOptionFunc) error {
+ patchOptions := &client.PatchOptions{}
+ patchOptions.ApplyOptions(opts)
+
+ for _, dryRunOpt := range patchOptions.DryRun {
+ if dryRunOpt == metav1.DryRunAll {
+ return nil
+ }
+ }
+
+ gvr, err := getGVRFromObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ reaction := testing.ObjectReaction(c.tracker)
+ handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data))
+ if err != nil {
+ return err
+ }
+ if !handled {
+ panic("tracker could not handle patch method")
+ }
+
+ j, err := json.Marshal(o)
+ if err != nil {
+ return err
+ }
+ decoder := scheme.Codecs.UniversalDecoder()
+ _, _, err = decoder.Decode(j, nil, obj)
+ return err
+}
+
+func (c *fakeClient) Status() client.StatusWriter {
+ return &fakeStatusWriter{client: c}
+}
+
+func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) {
+ gvk, err := apiutil.GVKForObject(obj, scheme)
+ if err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+ gvr, _ := meta.UnsafeGuessKindToResource(gvk)
+ return gvr, nil
+}
+
+type fakeStatusWriter struct {
+ client *fakeClient
+}
+
+func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOptionFunc) error {
+ // TODO(droot): This results in full update of the obj (spec + status). Need
+ // a way to update status field only.
+ return sw.client.Update(ctx, obj, opts...)
+}
+
+func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOptionFunc) error {
+ // TODO(droot): This results in full update of the obj (spec + status). Need
+ // a way to update status field only.
+ return sw.client.Patch(ctx, obj, patch, opts...)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go
new file mode 100644
index 0000000000..3b9099fa0a
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package fake provides a fake client for testing.
+
+An fake client is backed by its simple object store indexed by GroupVersionResource.
+You can create a fake client with optional objects.
+
+ client := NewFakeClient(initObjs...) // initObjs is a slice of runtime.Object
+
+You can invoke the methods defined in the Client interface.
+
+When it doubt, it's almost always better not to use this package and instead use
+envtest.Environment with a real client and API server.
+*/
+package fake
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/BUILD.bazel
new file mode 100644
index 0000000000..be0dc45e55
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "kube_helpers.go",
+ "zap.go",
+ ],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap",
+ importpath = "sigs.k8s.io/controller-runtime/pkg/log/zap",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/go-logr/logr:go_default_library",
+ "//vendor/github.com/go-logr/zapr:go_default_library",
+ "//vendor/go.uber.org/zap:go_default_library",
+ "//vendor/go.uber.org/zap/buffer:go_default_library",
+ "//vendor/go.uber.org/zap/zapcore:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+ ],
+)
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go
new file mode 100644
index 0000000000..2c0d88386d
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/zapcore"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// KubeAwareEncoder is a Kubernetes-aware Zap Encoder.
+// Instead of trying to force Kubernetes objects to implement
+// ObjectMarshaller, we just implement a wrapper around a normal
+// ObjectMarshaller that checks for Kubernetes objects.
+type KubeAwareEncoder struct {
+ // Encoder is the zapcore.Encoder that this encoder delegates to
+ zapcore.Encoder
+
+ // Verbose controls whether or not the full object is printed.
+ // If false, only name, namespace, api version, and kind are printed.
+ // Otherwise, the full object is logged.
+ Verbose bool
+}
+
+// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName
+type namespacedNameWrapper struct {
+ types.NamespacedName
+}
+
+func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ if w.Namespace != "" {
+ enc.AddString("namespace", w.Namespace)
+ }
+
+ enc.AddString("name", w.Name)
+
+ return nil
+}
+
+// kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects.
+type kubeObjectWrapper struct {
+ obj runtime.Object
+}
+
+// MarshalLogObject implements zapcore.ObjectMarshaler
+func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // TODO(directxman12): log kind and apiversion if not set explicitly (common case)
+ // -- needs an a scheme to convert to the GVK.
+ gvk := w.obj.GetObjectKind().GroupVersionKind()
+ if gvk.Version != "" {
+ enc.AddString("apiVersion", gvk.GroupVersion().String())
+ enc.AddString("kind", gvk.Kind)
+ }
+
+ objMeta, err := meta.Accessor(w.obj)
+ if err != nil {
+ return fmt.Errorf("got runtime.Object without object metadata: %v", w.obj)
+ }
+
+ ns := objMeta.GetNamespace()
+ if ns != "" {
+ enc.AddString("namespace", ns)
+ }
+ enc.AddString("name", objMeta.GetName())
+
+ return nil
+}
+
+// NB(directxman12): can't just override AddReflected, since the encoder calls AddReflected on itself directly
+
+// Clone implements zapcore.Encoder
+func (k *KubeAwareEncoder) Clone() zapcore.Encoder {
+ return &KubeAwareEncoder{
+ Encoder: k.Encoder.Clone(),
+ }
+}
+
+// EncodeEntry implements zapcore.Encoder
+func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+ if k.Verbose {
+ // Kubernetes objects implement fmt.Stringer, so if we
+ // want verbose output, just delegate to that.
+ return k.Encoder.EncodeEntry(entry, fields)
+ }
+
+ for i, field := range fields {
+ // intercept stringer fields that happen to be Kubernetes runtime.Object or
+ // types.NamespacedName values (Kubernetes runtime.Objects commonly
+ // implement String, apparently).
+ if field.Type == zapcore.StringerType {
+ switch val := field.Interface.(type) {
+ case runtime.Object:
+ fields[i] = zapcore.Field{
+ Type: zapcore.ObjectMarshalerType,
+ Key: field.Key,
+ Interface: kubeObjectWrapper{obj: val},
+ }
+ case types.NamespacedName:
+ fields[i] = zapcore.Field{
+ Type: zapcore.ObjectMarshalerType,
+ Key: field.Key,
+ Interface: namespacedNameWrapper{NamespacedName: val},
+ }
+ }
+ }
+ }
+
+ return k.Encoder.EncodeEntry(entry, fields)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go
new file mode 100644
index 0000000000..5812e85bfb
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package zap contains helpers for setting up a new logr.Logger instance
+// using the Zap logging framework.
+package zap
+
+import (
+ "io"
+ "os"
+ "time"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/zapr"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// Logger is a Logger implementation.
+// If development is true, a Zap development config will be used
+// (stacktraces on warnings, no sampling), otherwise a Zap production
+// config will be used (stacktraces on errors, sampling).
+func Logger(development bool) logr.Logger {
+ return LoggerTo(os.Stderr, development)
+}
+
+// LoggerTo returns a new Logger implementation using Zap which logs
+// to the given destination, instead of stderr. It otherwise behaves like
+// ZapLogger.
+func LoggerTo(destWriter io.Writer, development bool) logr.Logger {
+ return zapr.NewLogger(RawLoggerTo(destWriter, development))
+}
+
+// RawLoggerTo returns a new zap.Logger configured with KubeAwareEncoder
+// which logs to a given destination
+func RawLoggerTo(destWriter io.Writer, development bool, opts ...zap.Option) *zap.Logger {
+ // this basically mimics NewConfig, but with a custom sink
+ sink := zapcore.AddSync(destWriter)
+
+ var enc zapcore.Encoder
+ var lvl zap.AtomicLevel
+ if development {
+ encCfg := zap.NewDevelopmentEncoderConfig()
+ enc = zapcore.NewConsoleEncoder(encCfg)
+ lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
+ opts = append(opts, zap.Development(), zap.AddStacktrace(zap.ErrorLevel))
+ } else {
+ encCfg := zap.NewProductionEncoderConfig()
+ enc = zapcore.NewJSONEncoder(encCfg)
+ lvl = zap.NewAtomicLevelAt(zap.InfoLevel)
+ opts = append(opts, zap.AddStacktrace(zap.WarnLevel),
+ zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewSampler(core, time.Second, 100, 100)
+ }))
+ }
+ opts = append(opts, zap.AddCallerSkip(1), zap.ErrorOutput(sink))
+ log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: enc, Verbose: development}, sink, lvl))
+ log = log.WithOptions(opts...)
+ return log
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/BUILD.bazel
new file mode 100644
index 0000000000..fd01f34e73
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["log.go"],
+ importmap = "sigs.k8s.io/cluster-api-provider-aws/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log",
+ importpath = "sigs.k8s.io/controller-runtime/pkg/runtime/log",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/go-logr/logr:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/log:go_default_library",
+ "//vendor/sigs.k8s.io/controller-runtime/pkg/log/zap:go_default_library",
+ ],
+)
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go
new file mode 100644
index 0000000000..c5ec5a2393
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package log contains (deprecated) utilities for fetching a new logger when
+// one is not already available.
+//
+// Deprecated: use pkg/log
+package log
+
+import (
+ "github.com/go-logr/logr"
+
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+var (
+ // ZapLogger is a Logger implementation.
+ // If development is true, a Zap development config will be used
+ // (stacktraces on warnings, no sampling), otherwise a Zap production
+ // config will be used (stacktraces on errors, sampling).
+ ZapLogger = zap.Logger
+
+ // ZapLoggerTo returns a new Logger implementation using Zap which logs
+ // to the given destination, instead of stderr. It otherise behaves like
+ // ZapLogger.
+ ZapLoggerTo = zap.Logger
+
+ // SetLogger sets a concrete logging implementation for all deferred Loggers.
+ SetLogger = log.SetLogger
+
+ // Log is the base logger used by kubebuilder. It delegates
+ // to another logr.Logger. You *must* call SetLogger to
+ // get any actual logging.
+ Log = log.Log
+
+ // KBLog is a base parent logger for use inside controller-runtime.
+ // Deprecated: don't use this outside controller-runtime
+ // (inside CR, use pkg/internal/log.RuntimeLog)
+ KBLog logr.Logger
+)
+
+func init() {
+ KBLog = log.Log.WithName("controller-runtime")
+}