From a8d0622f3671f28e1b835fa630fe3050c5739bca Mon Sep 17 00:00:00 2001 From: mykolalosev Date: Mon, 23 Jan 2023 13:38:20 +0200 Subject: [PATCH] issue-151 --- .../v1beta1/zz_customservice_types.go | 128 + apis/monitoring/v1beta1/zz_dashboard_types.go | 94 + .../v1beta1/zz_generated.deepcopy.go | 2490 +++++++++++++++-- .../v1beta1/zz_generated.managed.go | 330 +++ .../v1beta1/zz_generated.managedlist.go | 45 + .../v1beta1/zz_generated.resolvers.go | 107 + .../v1beta1/zz_generated_terraformed.go | 370 +++ apis/monitoring/v1beta1/zz_group_types.go | 125 + .../v1beta1/zz_metricdescriptor_types.go | 186 ++ apis/monitoring/v1beta1/zz_slo_types.go | 666 +++++ .../v1beta1/zz_uptimecheckconfig_types.go | 10 + config/externalname.go | 10 + config/externalnamenottested.go | 19 +- .../monitoring/customservice.yaml | 20 + examples-generated/monitoring/dashboard.yaml | 25 + examples-generated/monitoring/group.yaml | 15 + .../monitoring/metricdescriptor.yaml | 27 + examples-generated/monitoring/slo.yaml | 26 + examples/monitoring/customservice.yaml | 17 + examples/monitoring/dashboard.yaml | 21 + examples/monitoring/group.yaml | 12 + examples/monitoring/metricdescriptor.yaml | 24 + examples/monitoring/slo.yaml | 45 + .../monitoring/customservice/zz_controller.go | 63 + .../monitoring/dashboard/zz_controller.go | 63 + .../monitoring/group/zz_controller.go | 63 + .../metricdescriptor/zz_controller.go | 63 + .../monitoring/slo/zz_controller.go | 63 + internal/controller/zz_setup.go | 10 + ...itoring.gcp.upbound.io_customservices.yaml | 332 +++ .../monitoring.gcp.upbound.io_dashboards.yaml | 306 ++ .../monitoring.gcp.upbound.io_groups.yaml | 396 +++ ...ring.gcp.upbound.io_metricdescriptors.yaml | 421 +++ .../crds/monitoring.gcp.upbound.io_sloes.yaml | 876 ++++++ ...ing.gcp.upbound.io_uptimecheckconfigs.yaml | 78 + 35 files changed, 7254 insertions(+), 292 deletions(-) create mode 100755 apis/monitoring/v1beta1/zz_customservice_types.go create mode 100755 apis/monitoring/v1beta1/zz_dashboard_types.go create mode 100644 apis/monitoring/v1beta1/zz_generated.resolvers.go create mode 100755 apis/monitoring/v1beta1/zz_group_types.go create mode 100755 apis/monitoring/v1beta1/zz_metricdescriptor_types.go create mode 100755 apis/monitoring/v1beta1/zz_slo_types.go create mode 100644 examples-generated/monitoring/customservice.yaml create mode 100644 examples-generated/monitoring/dashboard.yaml create mode 100644 examples-generated/monitoring/group.yaml create mode 100644 examples-generated/monitoring/metricdescriptor.yaml create mode 100644 examples-generated/monitoring/slo.yaml create mode 100644 examples/monitoring/customservice.yaml create mode 100644 examples/monitoring/dashboard.yaml create mode 100644 examples/monitoring/group.yaml create mode 100644 examples/monitoring/metricdescriptor.yaml create mode 100644 examples/monitoring/slo.yaml create mode 100755 internal/controller/monitoring/customservice/zz_controller.go create mode 100755 internal/controller/monitoring/dashboard/zz_controller.go create mode 100755 internal/controller/monitoring/group/zz_controller.go create mode 100755 internal/controller/monitoring/metricdescriptor/zz_controller.go create mode 100755 internal/controller/monitoring/slo/zz_controller.go create mode 100644 package/crds/monitoring.gcp.upbound.io_customservices.yaml create mode 100644 package/crds/monitoring.gcp.upbound.io_dashboards.yaml create mode 100644 package/crds/monitoring.gcp.upbound.io_groups.yaml create mode 100644 package/crds/monitoring.gcp.upbound.io_metricdescriptors.yaml create mode 100644 package/crds/monitoring.gcp.upbound.io_sloes.yaml diff --git a/apis/monitoring/v1beta1/zz_customservice_types.go b/apis/monitoring/v1beta1/zz_customservice_types.go new file mode 100755 index 000000000..536108aa8 --- /dev/null +++ b/apis/monitoring/v1beta1/zz_customservice_types.go @@ -0,0 +1,128 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomServiceObservation struct { + + // an identifier for the resource with format {{name}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The full resource name for this service. The syntax is: + // projects/[PROJECT_ID]/services/[SERVICE_ID]. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CustomServiceParameters struct { + + // Name used for UI elements listing this Service. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // An optional service ID to use. If not given, the server will generate a + // service ID. + // +kubebuilder:validation:Optional + ServiceID *string `json:"serviceId,omitempty" tf:"service_id,omitempty"` + + // Configuration for how to query telemetry on a Service. + // Structure is documented below. + // +kubebuilder:validation:Optional + Telemetry []TelemetryParameters `json:"telemetry,omitempty" tf:"telemetry,omitempty"` + + // Labels which have been used to annotate the service. Label keys must start + // with a letter. Label keys and values may contain lowercase letters, + // numbers, underscores, and dashes. Label keys and values have a maximum + // length of 63 characters, and must be less than 128 bytes in size. Up to 64 + // label entries may be stored. For labels which do not have a semantic value, + // the empty string may be supplied for the label value. + // +kubebuilder:validation:Optional + UserLabels map[string]*string `json:"userLabels,omitempty" tf:"user_labels,omitempty"` +} + +type TelemetryObservation struct { +} + +type TelemetryParameters struct { + + // The full name of the resource that defines this service. + // Formatted as described in + // https://cloud.google.com/apis/design/resource_names. + // +kubebuilder:validation:Optional + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` +} + +// CustomServiceSpec defines the desired state of CustomService +type CustomServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomServiceParameters `json:"forProvider"` +} + +// CustomServiceStatus defines the observed state of CustomService. +type CustomServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomService is the Schema for the CustomServices API. A Service is a discrete, autonomous, and network-accessible unit, designed to solve an individual concern (Wikipedia). +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type CustomService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CustomServiceSpec `json:"spec"` + Status CustomServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomServiceList contains a list of CustomServices +type CustomServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomService `json:"items"` +} + +// Repository type metadata. +var ( + CustomService_Kind = "CustomService" + CustomService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomService_Kind}.String() + CustomService_KindAPIVersion = CustomService_Kind + "." + CRDGroupVersion.String() + CustomService_GroupVersionKind = CRDGroupVersion.WithKind(CustomService_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomService{}, &CustomServiceList{}) +} diff --git a/apis/monitoring/v1beta1/zz_dashboard_types.go b/apis/monitoring/v1beta1/zz_dashboard_types.go new file mode 100755 index 000000000..befb6fe53 --- /dev/null +++ b/apis/monitoring/v1beta1/zz_dashboard_types.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DashboardObservation struct { + + // an identifier for the resource with format projects/{project_id_or_number}/dashboards/{dashboard_id} + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DashboardParameters struct { + + // The JSON representation of a dashboard, following the format at https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards. + // The representation of an existing dashboard can be found by using the API Explorer + // +kubebuilder:validation:Required + DashboardJSON *string `json:"dashboardJson" tf:"dashboard_json,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` +} + +// DashboardSpec defines the desired state of Dashboard +type DashboardSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DashboardParameters `json:"forProvider"` +} + +// DashboardStatus defines the observed state of Dashboard. +type DashboardStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DashboardObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Dashboard is the Schema for the Dashboards API. A Google Stackdriver dashboard. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type Dashboard struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DashboardSpec `json:"spec"` + Status DashboardStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DashboardList contains a list of Dashboards +type DashboardList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Dashboard `json:"items"` +} + +// Repository type metadata. +var ( + Dashboard_Kind = "Dashboard" + Dashboard_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Dashboard_Kind}.String() + Dashboard_KindAPIVersion = Dashboard_Kind + "." + CRDGroupVersion.String() + Dashboard_GroupVersionKind = CRDGroupVersion.WithKind(Dashboard_Kind) +) + +func init() { + SchemeBuilder.Register(&Dashboard{}, &DashboardList{}) +} diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 2d7b1cacc..baa999096 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -414,6 +414,265 @@ func (in *AuthInfoParameters) DeepCopy() *AuthInfoParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailabilityObservation) DeepCopyInto(out *AvailabilityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityObservation. +func (in *AvailabilityObservation) DeepCopy() *AvailabilityObservation { + if in == nil { + return nil + } + out := new(AvailabilityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailabilityParameters) DeepCopyInto(out *AvailabilityParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityParameters. +func (in *AvailabilityParameters) DeepCopy() *AvailabilityParameters { + if in == nil { + return nil + } + out := new(AvailabilityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliObservation) DeepCopyInto(out *BasicSliObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliObservation. +func (in *BasicSliObservation) DeepCopy() *BasicSliObservation { + if in == nil { + return nil + } + out := new(BasicSliObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliParameters) DeepCopyInto(out *BasicSliParameters) { + *out = *in + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = make([]AvailabilityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Latency != nil { + in, out := &in.Latency, &out.Latency + *out = make([]LatencyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliParameters. +func (in *BasicSliParameters) DeepCopy() *BasicSliParameters { + if in == nil { + return nil + } + out := new(BasicSliParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceAvailabilityObservation) DeepCopyInto(out *BasicSliPerformanceAvailabilityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceAvailabilityObservation. +func (in *BasicSliPerformanceAvailabilityObservation) DeepCopy() *BasicSliPerformanceAvailabilityObservation { + if in == nil { + return nil + } + out := new(BasicSliPerformanceAvailabilityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceAvailabilityParameters) DeepCopyInto(out *BasicSliPerformanceAvailabilityParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceAvailabilityParameters. +func (in *BasicSliPerformanceAvailabilityParameters) DeepCopy() *BasicSliPerformanceAvailabilityParameters { + if in == nil { + return nil + } + out := new(BasicSliPerformanceAvailabilityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceLatencyObservation) DeepCopyInto(out *BasicSliPerformanceLatencyObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceLatencyObservation. +func (in *BasicSliPerformanceLatencyObservation) DeepCopy() *BasicSliPerformanceLatencyObservation { + if in == nil { + return nil + } + out := new(BasicSliPerformanceLatencyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceLatencyParameters) DeepCopyInto(out *BasicSliPerformanceLatencyParameters) { + *out = *in + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceLatencyParameters. +func (in *BasicSliPerformanceLatencyParameters) DeepCopy() *BasicSliPerformanceLatencyParameters { + if in == nil { + return nil + } + out := new(BasicSliPerformanceLatencyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceObservation) DeepCopyInto(out *BasicSliPerformanceObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceObservation. +func (in *BasicSliPerformanceObservation) DeepCopy() *BasicSliPerformanceObservation { + if in == nil { + return nil + } + out := new(BasicSliPerformanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicSliPerformanceParameters) DeepCopyInto(out *BasicSliPerformanceParameters) { + *out = *in + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = make([]BasicSliPerformanceAvailabilityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Latency != nil { + in, out := &in.Latency, &out.Latency + *out = make([]BasicSliPerformanceLatencyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSliPerformanceParameters. +func (in *BasicSliPerformanceParameters) DeepCopy() *BasicSliPerformanceParameters { + if in == nil { + return nil + } + out := new(BasicSliPerformanceParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionAbsentObservation) DeepCopyInto(out *ConditionAbsentObservation) { *out = *in @@ -943,145 +1202,116 @@ func (in *CreationRecordParameters) DeepCopy() *CreationRecordParameters { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DenominatorAggregationsObservation) DeepCopyInto(out *DenominatorAggregationsObservation) { +func (in *CustomService) DeepCopyInto(out *CustomService) { *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenominatorAggregationsObservation. -func (in *DenominatorAggregationsObservation) DeepCopy() *DenominatorAggregationsObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomService. +func (in *CustomService) DeepCopy() *CustomService { if in == nil { return nil } - out := new(DenominatorAggregationsObservation) + out := new(CustomService) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DenominatorAggregationsParameters) DeepCopyInto(out *DenominatorAggregationsParameters) { +func (in *CustomServiceList) DeepCopyInto(out *CustomServiceList) { *out = *in - if in.AlignmentPeriod != nil { - in, out := &in.AlignmentPeriod, &out.AlignmentPeriod - *out = new(string) - **out = **in - } - if in.CrossSeriesReducer != nil { - in, out := &in.CrossSeriesReducer, &out.CrossSeriesReducer - *out = new(string) - **out = **in - } - if in.GroupByFields != nil { - in, out := &in.GroupByFields, &out.GroupByFields - *out = make([]*string, len(*in)) + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomService, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.PerSeriesAligner != nil { - in, out := &in.PerSeriesAligner, &out.PerSeriesAligner - *out = new(string) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenominatorAggregationsParameters. -func (in *DenominatorAggregationsParameters) DeepCopy() *DenominatorAggregationsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomServiceList. +func (in *CustomServiceList) DeepCopy() *CustomServiceList { if in == nil { return nil } - out := new(DenominatorAggregationsParameters) + out := new(CustomServiceList) in.DeepCopyInto(out) return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DocumentationObservation) DeepCopyInto(out *DocumentationObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationObservation. -func (in *DocumentationObservation) DeepCopy() *DocumentationObservation { - if in == nil { - return nil +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c } - out := new(DocumentationObservation) - in.DeepCopyInto(out) - return out + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DocumentationParameters) DeepCopyInto(out *DocumentationParameters) { +func (in *CustomServiceObservation) DeepCopyInto(out *CustomServiceObservation) { *out = *in - if in.Content != nil { - in, out := &in.Content, &out.Content + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.MimeType != nil { - in, out := &in.MimeType, &out.MimeType + if in.Name != nil { + in, out := &in.Name, &out.Name *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationParameters. -func (in *DocumentationParameters) DeepCopy() *DocumentationParameters { - if in == nil { - return nil - } - out := new(DocumentationParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPCheckObservation) DeepCopyInto(out *HTTPCheckObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCheckObservation. -func (in *HTTPCheckObservation) DeepCopy() *HTTPCheckObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomServiceObservation. +func (in *CustomServiceObservation) DeepCopy() *CustomServiceObservation { if in == nil { return nil } - out := new(HTTPCheckObservation) + out := new(CustomServiceObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPCheckParameters) DeepCopyInto(out *HTTPCheckParameters) { +func (in *CustomServiceParameters) DeepCopyInto(out *CustomServiceParameters) { *out = *in - if in.AcceptedResponseStatusCodes != nil { - in, out := &in.AcceptedResponseStatusCodes, &out.AcceptedResponseStatusCodes - *out = make([]AcceptedResponseStatusCodesParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AuthInfo != nil { - in, out := &in.AuthInfo, &out.AuthInfo - *out = make([]AuthInfoParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in } - if in.Body != nil { - in, out := &in.Body, &out.Body + if in.Project != nil { + in, out := &in.Project, &out.Project *out = new(string) **out = **in } - if in.ContentType != nil { - in, out := &in.ContentType, &out.ContentType + if in.ServiceID != nil { + in, out := &in.ServiceID, &out.ServiceID *out = new(string) **out = **in } - if in.Headers != nil { - in, out := &in.Headers, &out.Headers + if in.Telemetry != nil { + in, out := &in.Telemetry, &out.Telemetry + *out = make([]TelemetryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserLabels != nil { + in, out := &in.UserLabels, &out.UserLabels *out = make(map[string]*string, len(*in)) for key, val := range *in { var outVal *string @@ -1095,140 +1325,1808 @@ func (in *HTTPCheckParameters) DeepCopyInto(out *HTTPCheckParameters) { (*out)[key] = outVal } } - if in.MaskHeaders != nil { - in, out := &in.MaskHeaders, &out.MaskHeaders - *out = new(bool) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomServiceParameters. +func (in *CustomServiceParameters) DeepCopy() *CustomServiceParameters { + if in == nil { + return nil } - if in.Path != nil { - in, out := &in.Path, &out.Path + out := new(CustomServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomServiceSpec) DeepCopyInto(out *CustomServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomServiceSpec. +func (in *CustomServiceSpec) DeepCopy() *CustomServiceSpec { + if in == nil { + return nil + } + out := new(CustomServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomServiceStatus) DeepCopyInto(out *CustomServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomServiceStatus. +func (in *CustomServiceStatus) DeepCopy() *CustomServiceStatus { + if in == nil { + return nil + } + out := new(CustomServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dashboard) DeepCopyInto(out *Dashboard) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dashboard. +func (in *Dashboard) DeepCopy() *Dashboard { + if in == nil { + return nil + } + out := new(Dashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Dashboard) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardList) DeepCopyInto(out *DashboardList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Dashboard, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardList. +func (in *DashboardList) DeepCopy() *DashboardList { + if in == nil { + return nil + } + out := new(DashboardList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DashboardList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardObservation) DeepCopyInto(out *DashboardObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(float64) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardObservation. +func (in *DashboardObservation) DeepCopy() *DashboardObservation { + if in == nil { + return nil } - if in.RequestMethod != nil { - in, out := &in.RequestMethod, &out.RequestMethod + out := new(DashboardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardParameters) DeepCopyInto(out *DashboardParameters) { + *out = *in + if in.DashboardJSON != nil { + in, out := &in.DashboardJSON, &out.DashboardJSON *out = new(string) **out = **in } - if in.UseSSL != nil { - in, out := &in.UseSSL, &out.UseSSL - *out = new(bool) + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) **out = **in } - if in.ValidateSSL != nil { - in, out := &in.ValidateSSL, &out.ValidateSSL - *out = new(bool) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardParameters. +func (in *DashboardParameters) DeepCopy() *DashboardParameters { + if in == nil { + return nil } + out := new(DashboardParameters) + in.DeepCopyInto(out) + return out } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCheckParameters. -func (in *HTTPCheckParameters) DeepCopy() *HTTPCheckParameters { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSpec. +func (in *DashboardSpec) DeepCopy() *DashboardSpec { if in == nil { return nil } - out := new(HTTPCheckParameters) + out := new(DashboardSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONPathMatcherObservation) DeepCopyInto(out *JSONPathMatcherObservation) { +func (in *DashboardStatus) DeepCopyInto(out *DashboardStatus) { *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPathMatcherObservation. -func (in *JSONPathMatcherObservation) DeepCopy() *JSONPathMatcherObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardStatus. +func (in *DashboardStatus) DeepCopy() *DashboardStatus { if in == nil { return nil } - out := new(JSONPathMatcherObservation) + out := new(DashboardStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONPathMatcherParameters) DeepCopyInto(out *JSONPathMatcherParameters) { +func (in *DenominatorAggregationsObservation) DeepCopyInto(out *DenominatorAggregationsObservation) { *out = *in - if in.JSONMatcher != nil { - in, out := &in.JSONMatcher, &out.JSONMatcher +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenominatorAggregationsObservation. +func (in *DenominatorAggregationsObservation) DeepCopy() *DenominatorAggregationsObservation { + if in == nil { + return nil + } + out := new(DenominatorAggregationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenominatorAggregationsParameters) DeepCopyInto(out *DenominatorAggregationsParameters) { + *out = *in + if in.AlignmentPeriod != nil { + in, out := &in.AlignmentPeriod, &out.AlignmentPeriod *out = new(string) **out = **in } - if in.JSONPath != nil { - in, out := &in.JSONPath, &out.JSONPath + if in.CrossSeriesReducer != nil { + in, out := &in.CrossSeriesReducer, &out.CrossSeriesReducer + *out = new(string) + **out = **in + } + if in.GroupByFields != nil { + in, out := &in.GroupByFields, &out.GroupByFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PerSeriesAligner != nil { + in, out := &in.PerSeriesAligner, &out.PerSeriesAligner *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPathMatcherParameters. -func (in *JSONPathMatcherParameters) DeepCopy() *JSONPathMatcherParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenominatorAggregationsParameters. +func (in *DenominatorAggregationsParameters) DeepCopy() *DenominatorAggregationsParameters { if in == nil { return nil } - out := new(JSONPathMatcherParameters) + out := new(DenominatorAggregationsParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoredResourceObservation) DeepCopyInto(out *MonitoredResourceObservation) { +func (in *DistributionCutObservation) DeepCopyInto(out *DistributionCutObservation) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoredResourceObservation. -func (in *MonitoredResourceObservation) DeepCopy() *MonitoredResourceObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionCutObservation. +func (in *DistributionCutObservation) DeepCopy() *DistributionCutObservation { + if in == nil { + return nil + } + out := new(DistributionCutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionCutParameters) DeepCopyInto(out *DistributionCutParameters) { + *out = *in + if in.DistributionFilter != nil { + in, out := &in.DistributionFilter, &out.DistributionFilter + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = make([]RangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionCutParameters. +func (in *DistributionCutParameters) DeepCopy() *DistributionCutParameters { + if in == nil { + return nil + } + out := new(DistributionCutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionCutRangeObservation) DeepCopyInto(out *DistributionCutRangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionCutRangeObservation. +func (in *DistributionCutRangeObservation) DeepCopy() *DistributionCutRangeObservation { + if in == nil { + return nil + } + out := new(DistributionCutRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionCutRangeParameters) DeepCopyInto(out *DistributionCutRangeParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionCutRangeParameters. +func (in *DistributionCutRangeParameters) DeepCopy() *DistributionCutRangeParameters { + if in == nil { + return nil + } + out := new(DistributionCutRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationObservation) DeepCopyInto(out *DocumentationObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationObservation. +func (in *DocumentationObservation) DeepCopy() *DocumentationObservation { + if in == nil { + return nil + } + out := new(DocumentationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationParameters) DeepCopyInto(out *DocumentationParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.MimeType != nil { + in, out := &in.MimeType, &out.MimeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationParameters. +func (in *DocumentationParameters) DeepCopy() *DocumentationParameters { + if in == nil { + return nil + } + out := new(DocumentationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoodTotalRatioObservation) DeepCopyInto(out *GoodTotalRatioObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoodTotalRatioObservation. +func (in *GoodTotalRatioObservation) DeepCopy() *GoodTotalRatioObservation { + if in == nil { + return nil + } + out := new(GoodTotalRatioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoodTotalRatioParameters) DeepCopyInto(out *GoodTotalRatioParameters) { + *out = *in + if in.BadServiceFilter != nil { + in, out := &in.BadServiceFilter, &out.BadServiceFilter + *out = new(string) + **out = **in + } + if in.GoodServiceFilter != nil { + in, out := &in.GoodServiceFilter, &out.GoodServiceFilter + *out = new(string) + **out = **in + } + if in.TotalServiceFilter != nil { + in, out := &in.TotalServiceFilter, &out.TotalServiceFilter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoodTotalRatioParameters. +func (in *GoodTotalRatioParameters) DeepCopy() *GoodTotalRatioParameters { + if in == nil { + return nil + } + out := new(GoodTotalRatioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoodTotalRatioThresholdObservation) DeepCopyInto(out *GoodTotalRatioThresholdObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoodTotalRatioThresholdObservation. +func (in *GoodTotalRatioThresholdObservation) DeepCopy() *GoodTotalRatioThresholdObservation { + if in == nil { + return nil + } + out := new(GoodTotalRatioThresholdObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoodTotalRatioThresholdParameters) DeepCopyInto(out *GoodTotalRatioThresholdParameters) { + *out = *in + if in.BasicSliPerformance != nil { + in, out := &in.BasicSliPerformance, &out.BasicSliPerformance + *out = make([]BasicSliPerformanceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Performance != nil { + in, out := &in.Performance, &out.Performance + *out = make([]PerformanceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoodTotalRatioThresholdParameters. +func (in *GoodTotalRatioThresholdParameters) DeepCopy() *GoodTotalRatioThresholdParameters { + if in == nil { + return nil + } + out := new(GoodTotalRatioThresholdParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. +func (in *GroupObservation) DeepCopy() *GroupObservation { + if in == nil { + return nil + } + out := new(GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.IsCluster != nil { + in, out := &in.IsCluster, &out.IsCluster + *out = new(bool) + **out = **in + } + if in.ParentName != nil { + in, out := &in.ParentName, &out.ParentName + *out = new(string) + **out = **in + } + if in.ParentNameRef != nil { + in, out := &in.ParentNameRef, &out.ParentNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ParentNameSelector != nil { + in, out := &in.ParentNameSelector, &out.ParentNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. +func (in *GroupParameters) DeepCopy() *GroupParameters { + if in == nil { + return nil + } + out := new(GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. +func (in *GroupSpec) DeepCopy() *GroupSpec { + if in == nil { + return nil + } + out := new(GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCheckObservation) DeepCopyInto(out *HTTPCheckObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCheckObservation. +func (in *HTTPCheckObservation) DeepCopy() *HTTPCheckObservation { + if in == nil { + return nil + } + out := new(HTTPCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCheckParameters) DeepCopyInto(out *HTTPCheckParameters) { + *out = *in + if in.AcceptedResponseStatusCodes != nil { + in, out := &in.AcceptedResponseStatusCodes, &out.AcceptedResponseStatusCodes + *out = make([]AcceptedResponseStatusCodesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthInfo != nil { + in, out := &in.AuthInfo, &out.AuthInfo + *out = make([]AuthInfoParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MaskHeaders != nil { + in, out := &in.MaskHeaders, &out.MaskHeaders + *out = new(bool) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.RequestMethod != nil { + in, out := &in.RequestMethod, &out.RequestMethod + *out = new(string) + **out = **in + } + if in.UseSSL != nil { + in, out := &in.UseSSL, &out.UseSSL + *out = new(bool) + **out = **in + } + if in.ValidateSSL != nil { + in, out := &in.ValidateSSL, &out.ValidateSSL + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCheckParameters. +func (in *HTTPCheckParameters) DeepCopy() *HTTPCheckParameters { + if in == nil { + return nil + } + out := new(HTTPCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPathMatcherObservation) DeepCopyInto(out *JSONPathMatcherObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPathMatcherObservation. +func (in *JSONPathMatcherObservation) DeepCopy() *JSONPathMatcherObservation { + if in == nil { + return nil + } + out := new(JSONPathMatcherObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPathMatcherParameters) DeepCopyInto(out *JSONPathMatcherParameters) { + *out = *in + if in.JSONMatcher != nil { + in, out := &in.JSONMatcher, &out.JSONMatcher + *out = new(string) + **out = **in + } + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPathMatcherParameters. +func (in *JSONPathMatcherParameters) DeepCopy() *JSONPathMatcherParameters { + if in == nil { + return nil + } + out := new(JSONPathMatcherParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelsObservation) DeepCopyInto(out *LabelsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsObservation. +func (in *LabelsObservation) DeepCopy() *LabelsObservation { + if in == nil { + return nil + } + out := new(LabelsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelsParameters) DeepCopyInto(out *LabelsParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueType != nil { + in, out := &in.ValueType, &out.ValueType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsParameters. +func (in *LabelsParameters) DeepCopy() *LabelsParameters { + if in == nil { + return nil + } + out := new(LabelsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LatencyObservation) DeepCopyInto(out *LatencyObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LatencyObservation. +func (in *LatencyObservation) DeepCopy() *LatencyObservation { + if in == nil { + return nil + } + out := new(LatencyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LatencyParameters) DeepCopyInto(out *LatencyParameters) { + *out = *in + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LatencyParameters. +func (in *LatencyParameters) DeepCopy() *LatencyParameters { + if in == nil { + return nil + } + out := new(LatencyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataObservation) DeepCopyInto(out *MetadataObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataObservation. +func (in *MetadataObservation) DeepCopy() *MetadataObservation { + if in == nil { + return nil + } + out := new(MetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataParameters) DeepCopyInto(out *MetadataParameters) { + *out = *in + if in.IngestDelay != nil { + in, out := &in.IngestDelay, &out.IngestDelay + *out = new(string) + **out = **in + } + if in.SamplePeriod != nil { + in, out := &in.SamplePeriod, &out.SamplePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataParameters. +func (in *MetadataParameters) DeepCopy() *MetadataParameters { + if in == nil { + return nil + } + out := new(MetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptor) DeepCopyInto(out *MetricDescriptor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptor. +func (in *MetricDescriptor) DeepCopy() *MetricDescriptor { + if in == nil { + return nil + } + out := new(MetricDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricDescriptor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptorList) DeepCopyInto(out *MetricDescriptorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MetricDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptorList. +func (in *MetricDescriptorList) DeepCopy() *MetricDescriptorList { + if in == nil { + return nil + } + out := new(MetricDescriptorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricDescriptorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptorObservation) DeepCopyInto(out *MetricDescriptorObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MonitoredResourceTypes != nil { + in, out := &in.MonitoredResourceTypes, &out.MonitoredResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptorObservation. +func (in *MetricDescriptorObservation) DeepCopy() *MetricDescriptorObservation { + if in == nil { + return nil + } + out := new(MetricDescriptorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptorParameters) DeepCopyInto(out *MetricDescriptorParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]LabelsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchStage != nil { + in, out := &in.LaunchStage, &out.LaunchStage + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricKind != nil { + in, out := &in.MetricKind, &out.MetricKind + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.ValueType != nil { + in, out := &in.ValueType, &out.ValueType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptorParameters. +func (in *MetricDescriptorParameters) DeepCopy() *MetricDescriptorParameters { + if in == nil { + return nil + } + out := new(MetricDescriptorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptorSpec) DeepCopyInto(out *MetricDescriptorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptorSpec. +func (in *MetricDescriptorSpec) DeepCopy() *MetricDescriptorSpec { + if in == nil { + return nil + } + out := new(MetricDescriptorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDescriptorStatus) DeepCopyInto(out *MetricDescriptorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDescriptorStatus. +func (in *MetricDescriptorStatus) DeepCopy() *MetricDescriptorStatus { + if in == nil { + return nil + } + out := new(MetricDescriptorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricMeanInRangeObservation) DeepCopyInto(out *MetricMeanInRangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricMeanInRangeObservation. +func (in *MetricMeanInRangeObservation) DeepCopy() *MetricMeanInRangeObservation { + if in == nil { + return nil + } + out := new(MetricMeanInRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricMeanInRangeParameters) DeepCopyInto(out *MetricMeanInRangeParameters) { + *out = *in + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = make([]MetricMeanInRangeRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeSeries != nil { + in, out := &in.TimeSeries, &out.TimeSeries + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricMeanInRangeParameters. +func (in *MetricMeanInRangeParameters) DeepCopy() *MetricMeanInRangeParameters { + if in == nil { + return nil + } + out := new(MetricMeanInRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricMeanInRangeRangeObservation) DeepCopyInto(out *MetricMeanInRangeRangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricMeanInRangeRangeObservation. +func (in *MetricMeanInRangeRangeObservation) DeepCopy() *MetricMeanInRangeRangeObservation { + if in == nil { + return nil + } + out := new(MetricMeanInRangeRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricMeanInRangeRangeParameters) DeepCopyInto(out *MetricMeanInRangeRangeParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricMeanInRangeRangeParameters. +func (in *MetricMeanInRangeRangeParameters) DeepCopy() *MetricMeanInRangeRangeParameters { + if in == nil { + return nil + } + out := new(MetricMeanInRangeRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSumInRangeObservation) DeepCopyInto(out *MetricSumInRangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSumInRangeObservation. +func (in *MetricSumInRangeObservation) DeepCopy() *MetricSumInRangeObservation { + if in == nil { + return nil + } + out := new(MetricSumInRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSumInRangeParameters) DeepCopyInto(out *MetricSumInRangeParameters) { + *out = *in + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = make([]MetricSumInRangeRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeSeries != nil { + in, out := &in.TimeSeries, &out.TimeSeries + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSumInRangeParameters. +func (in *MetricSumInRangeParameters) DeepCopy() *MetricSumInRangeParameters { + if in == nil { + return nil + } + out := new(MetricSumInRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSumInRangeRangeObservation) DeepCopyInto(out *MetricSumInRangeRangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSumInRangeRangeObservation. +func (in *MetricSumInRangeRangeObservation) DeepCopy() *MetricSumInRangeRangeObservation { + if in == nil { + return nil + } + out := new(MetricSumInRangeRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSumInRangeRangeParameters) DeepCopyInto(out *MetricSumInRangeRangeParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSumInRangeRangeParameters. +func (in *MetricSumInRangeRangeParameters) DeepCopy() *MetricSumInRangeRangeParameters { + if in == nil { + return nil + } + out := new(MetricSumInRangeRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoredResourceObservation) DeepCopyInto(out *MonitoredResourceObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoredResourceObservation. +func (in *MonitoredResourceObservation) DeepCopy() *MonitoredResourceObservation { + if in == nil { + return nil + } + out := new(MonitoredResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoredResourceParameters) DeepCopyInto(out *MonitoredResourceParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoredResourceParameters. +func (in *MonitoredResourceParameters) DeepCopy() *MonitoredResourceParameters { + if in == nil { + return nil + } + out := new(MonitoredResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannel) DeepCopyInto(out *NotificationChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannel. +func (in *NotificationChannel) DeepCopy() *NotificationChannel { + if in == nil { + return nil + } + out := new(NotificationChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotificationChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannelList) DeepCopyInto(out *NotificationChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NotificationChannel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelList. +func (in *NotificationChannelList) DeepCopy() *NotificationChannelList { + if in == nil { + return nil + } + out := new(NotificationChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotificationChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannelObservation) DeepCopyInto(out *NotificationChannelObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.VerificationStatus != nil { + in, out := &in.VerificationStatus, &out.VerificationStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelObservation. +func (in *NotificationChannelObservation) DeepCopy() *NotificationChannelObservation { + if in == nil { + return nil + } + out := new(NotificationChannelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannelParameters) DeepCopyInto(out *NotificationChannelParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.SensitiveLabels != nil { + in, out := &in.SensitiveLabels, &out.SensitiveLabels + *out = make([]SensitiveLabelsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserLabels != nil { + in, out := &in.UserLabels, &out.UserLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelParameters. +func (in *NotificationChannelParameters) DeepCopy() *NotificationChannelParameters { + if in == nil { + return nil + } + out := new(NotificationChannelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannelSpec) DeepCopyInto(out *NotificationChannelSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelSpec. +func (in *NotificationChannelSpec) DeepCopy() *NotificationChannelSpec { + if in == nil { + return nil + } + out := new(NotificationChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationChannelStatus) DeepCopyInto(out *NotificationChannelStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelStatus. +func (in *NotificationChannelStatus) DeepCopy() *NotificationChannelStatus { + if in == nil { + return nil + } + out := new(NotificationChannelStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationRateLimitObservation) DeepCopyInto(out *NotificationRateLimitObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationRateLimitObservation. +func (in *NotificationRateLimitObservation) DeepCopy() *NotificationRateLimitObservation { + if in == nil { + return nil + } + out := new(NotificationRateLimitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationRateLimitParameters) DeepCopyInto(out *NotificationRateLimitParameters) { + *out = *in + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationRateLimitParameters. +func (in *NotificationRateLimitParameters) DeepCopy() *NotificationRateLimitParameters { + if in == nil { + return nil + } + out := new(NotificationRateLimitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceDistributionCutObservation) DeepCopyInto(out *PerformanceDistributionCutObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceDistributionCutObservation. +func (in *PerformanceDistributionCutObservation) DeepCopy() *PerformanceDistributionCutObservation { + if in == nil { + return nil + } + out := new(PerformanceDistributionCutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceDistributionCutParameters) DeepCopyInto(out *PerformanceDistributionCutParameters) { + *out = *in + if in.DistributionFilter != nil { + in, out := &in.DistributionFilter, &out.DistributionFilter + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = make([]DistributionCutRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceDistributionCutParameters. +func (in *PerformanceDistributionCutParameters) DeepCopy() *PerformanceDistributionCutParameters { + if in == nil { + return nil + } + out := new(PerformanceDistributionCutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceGoodTotalRatioObservation) DeepCopyInto(out *PerformanceGoodTotalRatioObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceGoodTotalRatioObservation. +func (in *PerformanceGoodTotalRatioObservation) DeepCopy() *PerformanceGoodTotalRatioObservation { + if in == nil { + return nil + } + out := new(PerformanceGoodTotalRatioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceGoodTotalRatioParameters) DeepCopyInto(out *PerformanceGoodTotalRatioParameters) { + *out = *in + if in.BadServiceFilter != nil { + in, out := &in.BadServiceFilter, &out.BadServiceFilter + *out = new(string) + **out = **in + } + if in.GoodServiceFilter != nil { + in, out := &in.GoodServiceFilter, &out.GoodServiceFilter + *out = new(string) + **out = **in + } + if in.TotalServiceFilter != nil { + in, out := &in.TotalServiceFilter, &out.TotalServiceFilter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceGoodTotalRatioParameters. +func (in *PerformanceGoodTotalRatioParameters) DeepCopy() *PerformanceGoodTotalRatioParameters { + if in == nil { + return nil + } + out := new(PerformanceGoodTotalRatioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceObservation) DeepCopyInto(out *PerformanceObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceObservation. +func (in *PerformanceObservation) DeepCopy() *PerformanceObservation { + if in == nil { + return nil + } + out := new(PerformanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceParameters) DeepCopyInto(out *PerformanceParameters) { + *out = *in + if in.DistributionCut != nil { + in, out := &in.DistributionCut, &out.DistributionCut + *out = make([]PerformanceDistributionCutParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GoodTotalRatio != nil { + in, out := &in.GoodTotalRatio, &out.GoodTotalRatio + *out = make([]PerformanceGoodTotalRatioParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceParameters. +func (in *PerformanceParameters) DeepCopy() *PerformanceParameters { + if in == nil { + return nil + } + out := new(PerformanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeObservation) DeepCopyInto(out *RangeObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeObservation. +func (in *RangeObservation) DeepCopy() *RangeObservation { + if in == nil { + return nil + } + out := new(RangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeParameters) DeepCopyInto(out *RangeParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeParameters. +func (in *RangeParameters) DeepCopy() *RangeParameters { + if in == nil { + return nil + } + out := new(RangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestBasedSliObservation) DeepCopyInto(out *RequestBasedSliObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestBasedSliObservation. +func (in *RequestBasedSliObservation) DeepCopy() *RequestBasedSliObservation { + if in == nil { + return nil + } + out := new(RequestBasedSliObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestBasedSliParameters) DeepCopyInto(out *RequestBasedSliParameters) { + *out = *in + if in.DistributionCut != nil { + in, out := &in.DistributionCut, &out.DistributionCut + *out = make([]DistributionCutParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GoodTotalRatio != nil { + in, out := &in.GoodTotalRatio, &out.GoodTotalRatio + *out = make([]GoodTotalRatioParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestBasedSliParameters. +func (in *RequestBasedSliParameters) DeepCopy() *RequestBasedSliParameters { + if in == nil { + return nil + } + out := new(RequestBasedSliParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupObservation) DeepCopyInto(out *ResourceGroupObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupObservation. +func (in *ResourceGroupObservation) DeepCopy() *ResourceGroupObservation { if in == nil { return nil } - out := new(MonitoredResourceObservation) + out := new(ResourceGroupObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoredResourceParameters) DeepCopyInto(out *MonitoredResourceParameters) { +func (in *ResourceGroupParameters) DeepCopyInto(out *ResourceGroupParameters) { *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in } - if in.Type != nil { - in, out := &in.Type, &out.Type + if in.GroupIDRef != nil { + in, out := &in.GroupIDRef, &out.GroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GroupIDSelector != nil { + in, out := &in.GroupIDSelector, &out.GroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoredResourceParameters. -func (in *MonitoredResourceParameters) DeepCopy() *MonitoredResourceParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupParameters. +func (in *ResourceGroupParameters) DeepCopy() *ResourceGroupParameters { if in == nil { return nil } - out := new(MonitoredResourceParameters) + out := new(ResourceGroupParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannel) DeepCopyInto(out *NotificationChannel) { +func (in *SLO) DeepCopyInto(out *SLO) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -1236,18 +3134,18 @@ func (in *NotificationChannel) DeepCopyInto(out *NotificationChannel) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannel. -func (in *NotificationChannel) DeepCopy() *NotificationChannel { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLO. +func (in *SLO) DeepCopy() *SLO { if in == nil { return nil } - out := new(NotificationChannel) + out := new(SLO) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NotificationChannel) DeepCopyObject() runtime.Object { +func (in *SLO) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -1255,31 +3153,31 @@ func (in *NotificationChannel) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannelList) DeepCopyInto(out *NotificationChannelList) { +func (in *SLOList) DeepCopyInto(out *SLOList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]NotificationChannel, len(*in)) + *out = make([]SLO, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelList. -func (in *NotificationChannelList) DeepCopy() *NotificationChannelList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLOList. +func (in *SLOList) DeepCopy() *SLOList { if in == nil { return nil } - out := new(NotificationChannelList) + out := new(SLOList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NotificationChannelList) DeepCopyObject() runtime.Object { +func (in *SLOList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -1287,7 +3185,7 @@ func (in *NotificationChannelList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannelObservation) DeepCopyInto(out *NotificationChannelObservation) { +func (in *SLOObservation) DeepCopyInto(out *SLOObservation) { *out = *in if in.ID != nil { in, out := &in.ID, &out.ID @@ -1299,28 +3197,30 @@ func (in *NotificationChannelObservation) DeepCopyInto(out *NotificationChannelO *out = new(string) **out = **in } - if in.VerificationStatus != nil { - in, out := &in.VerificationStatus, &out.VerificationStatus - *out = new(string) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelObservation. -func (in *NotificationChannelObservation) DeepCopy() *NotificationChannelObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLOObservation. +func (in *SLOObservation) DeepCopy() *SLOObservation { if in == nil { return nil } - out := new(NotificationChannelObservation) + out := new(SLOObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannelParameters) DeepCopyInto(out *NotificationChannelParameters) { +func (in *SLOParameters) DeepCopyInto(out *SLOParameters) { *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.BasicSli != nil { + in, out := &in.BasicSli, &out.BasicSli + *out = make([]BasicSliParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CalendarPeriod != nil { + in, out := &in.CalendarPeriod, &out.CalendarPeriod *out = new(string) **out = **in } @@ -1329,48 +3229,48 @@ func (in *NotificationChannelParameters) DeepCopyInto(out *NotificationChannelPa *out = new(string) **out = **in } - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ForceDelete != nil { - in, out := &in.ForceDelete, &out.ForceDelete - *out = new(bool) + if in.Goal != nil { + in, out := &in.Goal, &out.Goal + *out = new(float64) **out = **in } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } if in.Project != nil { in, out := &in.Project, &out.Project *out = new(string) **out = **in } - if in.SensitiveLabels != nil { - in, out := &in.SensitiveLabels, &out.SensitiveLabels - *out = make([]SensitiveLabelsParameters, len(*in)) + if in.RequestBasedSli != nil { + in, out := &in.RequestBasedSli, &out.RequestBasedSli + *out = make([]RequestBasedSliParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Type != nil { - in, out := &in.Type, &out.Type + if in.RollingPeriodDays != nil { + in, out := &in.RollingPeriodDays, &out.RollingPeriodDays + *out = new(float64) + **out = **in + } + if in.SLOID != nil { + in, out := &in.SLOID, &out.SLOID *out = new(string) **out = **in } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.ServiceRef != nil { + in, out := &in.ServiceRef, &out.ServiceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceSelector != nil { + in, out := &in.ServiceSelector, &out.ServiceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.UserLabels != nil { in, out := &in.UserLabels, &out.UserLabels *out = make(map[string]*string, len(*in)) @@ -1386,123 +3286,55 @@ func (in *NotificationChannelParameters) DeepCopyInto(out *NotificationChannelPa (*out)[key] = outVal } } + if in.WindowsBasedSli != nil { + in, out := &in.WindowsBasedSli, &out.WindowsBasedSli + *out = make([]WindowsBasedSliParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelParameters. -func (in *NotificationChannelParameters) DeepCopy() *NotificationChannelParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLOParameters. +func (in *SLOParameters) DeepCopy() *SLOParameters { if in == nil { return nil } - out := new(NotificationChannelParameters) + out := new(SLOParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannelSpec) DeepCopyInto(out *NotificationChannelSpec) { +func (in *SLOSpec) DeepCopyInto(out *SLOSpec) { *out = *in in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) in.ForProvider.DeepCopyInto(&out.ForProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelSpec. -func (in *NotificationChannelSpec) DeepCopy() *NotificationChannelSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLOSpec. +func (in *SLOSpec) DeepCopy() *SLOSpec { if in == nil { return nil } - out := new(NotificationChannelSpec) + out := new(SLOSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationChannelStatus) DeepCopyInto(out *NotificationChannelStatus) { +func (in *SLOStatus) DeepCopyInto(out *SLOStatus) { *out = *in in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationChannelStatus. -func (in *NotificationChannelStatus) DeepCopy() *NotificationChannelStatus { - if in == nil { - return nil - } - out := new(NotificationChannelStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationRateLimitObservation) DeepCopyInto(out *NotificationRateLimitObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationRateLimitObservation. -func (in *NotificationRateLimitObservation) DeepCopy() *NotificationRateLimitObservation { - if in == nil { - return nil - } - out := new(NotificationRateLimitObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NotificationRateLimitParameters) DeepCopyInto(out *NotificationRateLimitParameters) { - *out = *in - if in.Period != nil { - in, out := &in.Period, &out.Period - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationRateLimitParameters. -func (in *NotificationRateLimitParameters) DeepCopy() *NotificationRateLimitParameters { - if in == nil { - return nil - } - out := new(NotificationRateLimitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroupObservation) DeepCopyInto(out *ResourceGroupObservation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupObservation. -func (in *ResourceGroupObservation) DeepCopy() *ResourceGroupObservation { - if in == nil { - return nil - } - out := new(ResourceGroupObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceGroupParameters) DeepCopyInto(out *ResourceGroupParameters) { - *out = *in - if in.GroupID != nil { - in, out := &in.GroupID, &out.GroupID - *out = new(string) - **out = **in - } - if in.ResourceType != nil { - in, out := &in.ResourceType, &out.ResourceType - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupParameters. -func (in *ResourceGroupParameters) DeepCopy() *ResourceGroupParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLOStatus. +func (in *SLOStatus) DeepCopy() *SLOStatus { if in == nil { return nil } - out := new(ResourceGroupParameters) + out := new(SLOStatus) in.DeepCopyInto(out) return out } @@ -1587,6 +3419,41 @@ func (in *TCPCheckParameters) DeepCopy() *TCPCheckParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryObservation) DeepCopyInto(out *TelemetryObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryObservation. +func (in *TelemetryObservation) DeepCopy() *TelemetryObservation { + if in == nil { + return nil + } + out := new(TelemetryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemetryParameters) DeepCopyInto(out *TelemetryParameters) { + *out = *in + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryParameters. +func (in *TelemetryParameters) DeepCopy() *TelemetryParameters { + if in == nil { + return nil + } + out := new(TelemetryParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { *out = *in @@ -1835,3 +3702,64 @@ func (in *UptimeCheckConfigStatus) DeepCopy() *UptimeCheckConfigStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsBasedSliObservation) DeepCopyInto(out *WindowsBasedSliObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsBasedSliObservation. +func (in *WindowsBasedSliObservation) DeepCopy() *WindowsBasedSliObservation { + if in == nil { + return nil + } + out := new(WindowsBasedSliObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsBasedSliParameters) DeepCopyInto(out *WindowsBasedSliParameters) { + *out = *in + if in.GoodBadMetricFilter != nil { + in, out := &in.GoodBadMetricFilter, &out.GoodBadMetricFilter + *out = new(string) + **out = **in + } + if in.GoodTotalRatioThreshold != nil { + in, out := &in.GoodTotalRatioThreshold, &out.GoodTotalRatioThreshold + *out = make([]GoodTotalRatioThresholdParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricMeanInRange != nil { + in, out := &in.MetricMeanInRange, &out.MetricMeanInRange + *out = make([]MetricMeanInRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricSumInRange != nil { + in, out := &in.MetricSumInRange, &out.MetricSumInRange + *out = make([]MetricSumInRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowPeriod != nil { + in, out := &in.WindowPeriod, &out.WindowPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsBasedSliParameters. +func (in *WindowsBasedSliParameters) DeepCopy() *WindowsBasedSliParameters { + if in == nil { + return nil + } + out := new(WindowsBasedSliParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/monitoring/v1beta1/zz_generated.managed.go b/apis/monitoring/v1beta1/zz_generated.managed.go index 8a159ce99..61e01c4a8 100644 --- a/apis/monitoring/v1beta1/zz_generated.managed.go +++ b/apis/monitoring/v1beta1/zz_generated.managed.go @@ -85,6 +85,270 @@ func (mg *AlertPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReferen mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this CustomService. +func (mg *CustomService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomService. +func (mg *CustomService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this CustomService. +func (mg *CustomService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this CustomService. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *CustomService) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this CustomService. +func (mg *CustomService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomService. +func (mg *CustomService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomService. +func (mg *CustomService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomService. +func (mg *CustomService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this CustomService. +func (mg *CustomService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this CustomService. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *CustomService) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this CustomService. +func (mg *CustomService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomService. +func (mg *CustomService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Dashboard. +func (mg *Dashboard) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Dashboard. +func (mg *Dashboard) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Dashboard. +func (mg *Dashboard) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Dashboard. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Dashboard) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Dashboard. +func (mg *Dashboard) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Dashboard. +func (mg *Dashboard) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Dashboard. +func (mg *Dashboard) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Dashboard. +func (mg *Dashboard) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Dashboard. +func (mg *Dashboard) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Dashboard. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Dashboard) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Dashboard. +func (mg *Dashboard) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Dashboard. +func (mg *Dashboard) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Group. +func (mg *Group) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Group. +func (mg *Group) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Group. +func (mg *Group) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Group. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Group) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Group. +func (mg *Group) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Group. +func (mg *Group) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Group. +func (mg *Group) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Group. +func (mg *Group) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Group. +func (mg *Group) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Group. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Group) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Group. +func (mg *Group) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Group. +func (mg *Group) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MetricDescriptor. +func (mg *MetricDescriptor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MetricDescriptor. +func (mg *MetricDescriptor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this MetricDescriptor. +func (mg *MetricDescriptor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this MetricDescriptor. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *MetricDescriptor) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this MetricDescriptor. +func (mg *MetricDescriptor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MetricDescriptor. +func (mg *MetricDescriptor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MetricDescriptor. +func (mg *MetricDescriptor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MetricDescriptor. +func (mg *MetricDescriptor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this MetricDescriptor. +func (mg *MetricDescriptor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this MetricDescriptor. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *MetricDescriptor) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this MetricDescriptor. +func (mg *MetricDescriptor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MetricDescriptor. +func (mg *MetricDescriptor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this NotificationChannel. func (mg *NotificationChannel) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -151,6 +415,72 @@ func (mg *NotificationChannel) SetWriteConnectionSecretToReference(r *xpv1.Secre mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this SLO. +func (mg *SLO) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SLO. +func (mg *SLO) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this SLO. +func (mg *SLO) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this SLO. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *SLO) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this SLO. +func (mg *SLO) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SLO. +func (mg *SLO) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SLO. +func (mg *SLO) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SLO. +func (mg *SLO) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this SLO. +func (mg *SLO) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this SLO. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *SLO) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this SLO. +func (mg *SLO) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SLO. +func (mg *SLO) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this UptimeCheckConfig. func (mg *UptimeCheckConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/monitoring/v1beta1/zz_generated.managedlist.go b/apis/monitoring/v1beta1/zz_generated.managedlist.go index ba3ea5410..8f576ff6b 100644 --- a/apis/monitoring/v1beta1/zz_generated.managedlist.go +++ b/apis/monitoring/v1beta1/zz_generated.managedlist.go @@ -28,6 +28,42 @@ func (l *AlertPolicyList) GetItems() []resource.Managed { return items } +// GetItems of this CustomServiceList. +func (l *CustomServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DashboardList. +func (l *DashboardList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GroupList. +func (l *GroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MetricDescriptorList. +func (l *MetricDescriptorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this NotificationChannelList. func (l *NotificationChannelList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -37,6 +73,15 @@ func (l *NotificationChannelList) GetItems() []resource.Managed { return items } +// GetItems of this SLOList. +func (l *SLOList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this UptimeCheckConfigList. func (l *UptimeCheckConfigList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/monitoring/v1beta1/zz_generated.resolvers.go b/apis/monitoring/v1beta1/zz_generated.resolvers.go new file mode 100644 index 000000000..95bd46591 --- /dev/null +++ b/apis/monitoring/v1beta1/zz_generated.resolvers.go @@ -0,0 +1,107 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + resource "github.com/upbound/upjet/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Group. +func (mg *Group) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ParentName), + Extract: resource.ExtractParamPath("name", true), + Reference: mg.Spec.ForProvider.ParentNameRef, + Selector: mg.Spec.ForProvider.ParentNameSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ParentName") + } + mg.Spec.ForProvider.ParentName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ParentNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SLO. +func (mg *SLO) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Service), + Extract: resource.ExtractParamPath("service_id", false), + Reference: mg.Spec.ForProvider.ServiceRef, + Selector: mg.Spec.ForProvider.ServiceSelector, + To: reference.To{ + List: &CustomServiceList{}, + Managed: &CustomService{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Service") + } + mg.Spec.ForProvider.Service = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this UptimeCheckConfig. +func (mg *UptimeCheckConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ResourceGroup); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroup[i3].GroupID), + Extract: resource.ExtractParamPath("name", true), + Reference: mg.Spec.ForProvider.ResourceGroup[i3].GroupIDRef, + Selector: mg.Spec.ForProvider.ResourceGroup[i3].GroupIDSelector, + To: reference.To{ + List: &GroupList{}, + Managed: &Group{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroup[i3].GroupID") + } + mg.Spec.ForProvider.ResourceGroup[i3].GroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroup[i3].GroupIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/monitoring/v1beta1/zz_generated_terraformed.go b/apis/monitoring/v1beta1/zz_generated_terraformed.go index 318419710..b491c380f 100755 --- a/apis/monitoring/v1beta1/zz_generated_terraformed.go +++ b/apis/monitoring/v1beta1/zz_generated_terraformed.go @@ -99,6 +99,302 @@ func (tr *AlertPolicy) GetTerraformSchemaVersion() int { return 0 } +// GetTerraformResourceType returns Terraform resource type for this CustomService +func (mg *CustomService) GetTerraformResourceType() string { + return "google_monitoring_custom_service" +} + +// GetConnectionDetailsMapping for this CustomService +func (tr *CustomService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomService +func (tr *CustomService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomService +func (tr *CustomService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomService +func (tr *CustomService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomService +func (tr *CustomService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomService +func (tr *CustomService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this CustomService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomService) LateInitialize(attrs []byte) (bool, error) { + params := &CustomServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomService) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Dashboard +func (mg *Dashboard) GetTerraformResourceType() string { + return "google_monitoring_dashboard" +} + +// GetConnectionDetailsMapping for this Dashboard +func (tr *Dashboard) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Dashboard +func (tr *Dashboard) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Dashboard +func (tr *Dashboard) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Dashboard +func (tr *Dashboard) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Dashboard +func (tr *Dashboard) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Dashboard +func (tr *Dashboard) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Dashboard using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Dashboard) LateInitialize(attrs []byte) (bool, error) { + params := &DashboardParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Dashboard) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Group +func (mg *Group) GetTerraformResourceType() string { + return "google_monitoring_group" +} + +// GetConnectionDetailsMapping for this Group +func (tr *Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Group +func (tr *Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Group +func (tr *Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Group +func (tr *Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Group +func (tr *Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Group +func (tr *Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Group) LateInitialize(attrs []byte) (bool, error) { + params := &GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Group) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this MetricDescriptor +func (mg *MetricDescriptor) GetTerraformResourceType() string { + return "google_monitoring_metric_descriptor" +} + +// GetConnectionDetailsMapping for this MetricDescriptor +func (tr *MetricDescriptor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MetricDescriptor +func (tr *MetricDescriptor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MetricDescriptor +func (tr *MetricDescriptor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MetricDescriptor +func (tr *MetricDescriptor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MetricDescriptor +func (tr *MetricDescriptor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MetricDescriptor +func (tr *MetricDescriptor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this MetricDescriptor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MetricDescriptor) LateInitialize(attrs []byte) (bool, error) { + params := &MetricDescriptorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MetricDescriptor) GetTerraformSchemaVersion() int { + return 0 +} + // GetTerraformResourceType returns Terraform resource type for this NotificationChannel func (mg *NotificationChannel) GetTerraformResourceType() string { return "google_monitoring_notification_channel" @@ -173,6 +469,80 @@ func (tr *NotificationChannel) GetTerraformSchemaVersion() int { return 0 } +// GetTerraformResourceType returns Terraform resource type for this SLO +func (mg *SLO) GetTerraformResourceType() string { + return "google_monitoring_slo" +} + +// GetConnectionDetailsMapping for this SLO +func (tr *SLO) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SLO +func (tr *SLO) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SLO +func (tr *SLO) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SLO +func (tr *SLO) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SLO +func (tr *SLO) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SLO +func (tr *SLO) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this SLO using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SLO) LateInitialize(attrs []byte) (bool, error) { + params := &SLOParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SLO) GetTerraformSchemaVersion() int { + return 0 +} + // GetTerraformResourceType returns Terraform resource type for this UptimeCheckConfig func (mg *UptimeCheckConfig) GetTerraformResourceType() string { return "google_monitoring_uptime_check_config" diff --git a/apis/monitoring/v1beta1/zz_group_types.go b/apis/monitoring/v1beta1/zz_group_types.go new file mode 100755 index 000000000..ac0bdcb8f --- /dev/null +++ b/apis/monitoring/v1beta1/zz_group_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupObservation struct { + + // an identifier for the resource with format {{name}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A unique identifier for this group. The format is + // "projects/{project_id_or_number}/groups/{group_id}". + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type GroupParameters struct { + + // A user-assigned name for this group, used only for display + // purposes. + // +kubebuilder:validation:Required + DisplayName *string `json:"displayName" tf:"display_name,omitempty"` + + // The filter used to determine which monitored resources + // belong to this group. + // +kubebuilder:validation:Required + Filter *string `json:"filter" tf:"filter,omitempty"` + + // If true, the members of this group are considered to be a + // cluster. The system can perform additional analysis on + // groups that are clusters. + // +kubebuilder:validation:Optional + IsCluster *bool `json:"isCluster,omitempty" tf:"is_cluster,omitempty"` + + // The name of the group's parent, if it has one. The format is + // "projects/{project_id_or_number}/groups/{group_id}". For + // groups with no parent, parentName is the empty string, "". + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/monitoring/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("name",true) + // +kubebuilder:validation:Optional + ParentName *string `json:"parentName,omitempty" tf:"parent_name,omitempty"` + + // Reference to a Group in monitoring to populate parentName. + // +kubebuilder:validation:Optional + ParentNameRef *v1.Reference `json:"parentNameRef,omitempty" tf:"-"` + + // Selector for a Group in monitoring to populate parentName. + // +kubebuilder:validation:Optional + ParentNameSelector *v1.Selector `json:"parentNameSelector,omitempty" tf:"-"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` +} + +// GroupSpec defines the desired state of Group +type GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupParameters `json:"forProvider"` +} + +// GroupStatus defines the observed state of Group. +type GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Group is the Schema for the Groups API. The description of a dynamic collection of monitored resources. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GroupSpec `json:"spec"` + Status GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupList contains a list of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Group `json:"items"` +} + +// Repository type metadata. +var ( + Group_Kind = "Group" + Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Group_Kind}.String() + Group_KindAPIVersion = Group_Kind + "." + CRDGroupVersion.String() + Group_GroupVersionKind = CRDGroupVersion.WithKind(Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Group{}, &GroupList{}) +} diff --git a/apis/monitoring/v1beta1/zz_metricdescriptor_types.go b/apis/monitoring/v1beta1/zz_metricdescriptor_types.go new file mode 100755 index 000000000..37fe40378 --- /dev/null +++ b/apis/monitoring/v1beta1/zz_metricdescriptor_types.go @@ -0,0 +1,186 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LabelsObservation struct { +} + +type LabelsParameters struct { + + // A human-readable description for the label. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The key for this label. The key must not exceed 100 characters. The first character of the key must be an upper- or lower-case letter, the remaining characters must be letters, digits or underscores, and the key must match the regular expression [a-zA-Z][a-zA-Z0-9_]* + // +kubebuilder:validation:Required + Key *string `json:"key" tf:"key,omitempty"` + + // The type of data that can be assigned to the label. + // Default value is STRING. + // Possible values are STRING, BOOL, and INT64. + // +kubebuilder:validation:Optional + ValueType *string `json:"valueType,omitempty" tf:"value_type,omitempty"` +} + +type MetadataObservation struct { +} + +type MetadataParameters struct { + + // The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors. In [duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration). + // +kubebuilder:validation:Optional + IngestDelay *string `json:"ingestDelay,omitempty" tf:"ingest_delay,omitempty"` + + // The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period. In [duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration). + // +kubebuilder:validation:Optional + SamplePeriod *string `json:"samplePeriod,omitempty" tf:"sample_period,omitempty"` +} + +type MetricDescriptorObservation struct { + + // an identifier for the resource with format {{name}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here. This field allows time series to be associated with the intersection of this metric type and the monitored resource types in this list. + MonitoredResourceTypes []*string `json:"monitoredResourceTypes,omitempty" tf:"monitored_resource_types,omitempty"` + + // The resource name of the metric descriptor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MetricDescriptorParameters struct { + + // A detailed description of the metric, which can be used in documentation. + // +kubebuilder:validation:Required + Description *string `json:"description" tf:"description,omitempty"` + + // A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example "Request count". + // +kubebuilder:validation:Required + DisplayName *string `json:"displayName" tf:"display_name,omitempty"` + + // The set of labels that can be used to describe a specific instance of this metric type. In order to delete a label, the entire resource must be deleted, then created with the desired labels. + // Structure is documented below. + // +kubebuilder:validation:Optional + Labels []LabelsParameters `json:"labels,omitempty" tf:"labels,omitempty"` + + // The launch stage of the metric definition. + // Possible values are LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, and DEPRECATED. + // +kubebuilder:validation:Optional + LaunchStage *string `json:"launchStage,omitempty" tf:"launch_stage,omitempty"` + + // Metadata which can be used to guide usage of the metric. + // Structure is documented below. + // +kubebuilder:validation:Optional + Metadata []MetadataParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metricKind and valueType might not be supported. + // Possible values are METRIC_KIND_UNSPECIFIED, GAUGE, DELTA, and CUMULATIVE. + // +kubebuilder:validation:Required + MetricKind *string `json:"metricKind" tf:"metric_kind,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // The metric type, including its DNS name prefix. The type is not URL-encoded. All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must have only upper and lower-case letters, digits, '/' and underscores '_' are allowed. Additionally, the maximum number of characters allowed for the relative_metric_name is 100. All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/. + // +kubebuilder:validation:Required + Type *string `json:"type" tf:"type,omitempty"` + + // The units in which the metric value is reported. It is only applicable if the + // valueType is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of + // the stored metric values. + // Different systems may scale the values to be more easily displayed (so a value of + // 0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as + // 3.5MBy). However, if the unit is KBy, then the value of the metric is always in + // thousands of bytes, no matter how it may be displayed. + // If you want a custom metric to record the exact number of CPU-seconds used by a job, + // you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently + // 1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as + // 12005. + // Alternatively, if you want a custom metric to record data in a more granular way, you + // can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value + // 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024). + // The supported units are a subset of The Unified Code for Units of Measure standard. + // More info can be found in the API documentation + // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors). + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Whether the measurement is an integer, a floating-point number, etc. Some combinations of metricKind and valueType might not be supported. + // Possible values are BOOL, INT64, DOUBLE, STRING, and DISTRIBUTION. + // +kubebuilder:validation:Required + ValueType *string `json:"valueType" tf:"value_type,omitempty"` +} + +// MetricDescriptorSpec defines the desired state of MetricDescriptor +type MetricDescriptorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MetricDescriptorParameters `json:"forProvider"` +} + +// MetricDescriptorStatus defines the observed state of MetricDescriptor. +type MetricDescriptorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MetricDescriptorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// MetricDescriptor is the Schema for the MetricDescriptors API. Defines a metric type and its schema. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type MetricDescriptor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MetricDescriptorSpec `json:"spec"` + Status MetricDescriptorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MetricDescriptorList contains a list of MetricDescriptors +type MetricDescriptorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MetricDescriptor `json:"items"` +} + +// Repository type metadata. +var ( + MetricDescriptor_Kind = "MetricDescriptor" + MetricDescriptor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MetricDescriptor_Kind}.String() + MetricDescriptor_KindAPIVersion = MetricDescriptor_Kind + "." + CRDGroupVersion.String() + MetricDescriptor_GroupVersionKind = CRDGroupVersion.WithKind(MetricDescriptor_Kind) +) + +func init() { + SchemeBuilder.Register(&MetricDescriptor{}, &MetricDescriptorList{}) +} diff --git a/apis/monitoring/v1beta1/zz_slo_types.go b/apis/monitoring/v1beta1/zz_slo_types.go new file mode 100755 index 000000000..57a38afd8 --- /dev/null +++ b/apis/monitoring/v1beta1/zz_slo_types.go @@ -0,0 +1,666 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AvailabilityObservation struct { +} + +type AvailabilityParameters struct { + + // Whether an availability SLI is enabled or not. Must be set to true. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BasicSliObservation struct { +} + +type BasicSliParameters struct { + + // Availability based SLI, dervied from count of requests made to this service that return successfully. + // Structure is documented below. + // +kubebuilder:validation:Optional + Availability []AvailabilityParameters `json:"availability,omitempty" tf:"availability,omitempty"` + + // Parameters for a latency threshold SLI. + // Structure is documented below. + // +kubebuilder:validation:Optional + Latency []LatencyParameters `json:"latency,omitempty" tf:"latency,omitempty"` + + // An optional set of locations to which this SLI is relevant. + // Telemetry from other locations will not be used to calculate + // performance for this SLI. If omitted, this SLI applies to all + // locations in which the Service has activity. For service types + // that don't support breaking down by location, setting this + // field will result in an error. + // +kubebuilder:validation:Optional + Location []*string `json:"location,omitempty" tf:"location,omitempty"` + + // An optional set of RPCs to which this SLI is relevant. + // Telemetry from other methods will not be used to calculate + // performance for this SLI. If omitted, this SLI applies to all + // the Service's methods. For service types that don't support + // breaking down by method, setting this field will result in an + // error. + // +kubebuilder:validation:Optional + Method []*string `json:"method,omitempty" tf:"method,omitempty"` + + // The set of API versions to which this SLI is relevant. + // Telemetry from other API versions will not be used to + // calculate performance for this SLI. If omitted, + // this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this + // field will result in an error. + // +kubebuilder:validation:Optional + Version []*string `json:"version,omitempty" tf:"version,omitempty"` +} + +type BasicSliPerformanceAvailabilityObservation struct { +} + +type BasicSliPerformanceAvailabilityParameters struct { + + // Whether an availability SLI is enabled or not. Must be set to true. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BasicSliPerformanceLatencyObservation struct { +} + +type BasicSliPerformanceLatencyParameters struct { + + // A duration string, e.g. 10s. + // Good service is defined to be the count of requests made to + // this service that return in no more than threshold. + // +kubebuilder:validation:Required + Threshold *string `json:"threshold" tf:"threshold,omitempty"` +} + +type BasicSliPerformanceObservation struct { +} + +type BasicSliPerformanceParameters struct { + + // Availability based SLI, dervied from count of requests made to this service that return successfully. + // Structure is documented below. + // +kubebuilder:validation:Optional + Availability []BasicSliPerformanceAvailabilityParameters `json:"availability,omitempty" tf:"availability,omitempty"` + + // Parameters for a latency threshold SLI. + // Structure is documented below. + // +kubebuilder:validation:Optional + Latency []BasicSliPerformanceLatencyParameters `json:"latency,omitempty" tf:"latency,omitempty"` + + // An optional set of locations to which this SLI is relevant. + // Telemetry from other locations will not be used to calculate + // performance for this SLI. If omitted, this SLI applies to all + // locations in which the Service has activity. For service types + // that don't support breaking down by location, setting this + // field will result in an error. + // +kubebuilder:validation:Optional + Location []*string `json:"location,omitempty" tf:"location,omitempty"` + + // An optional set of RPCs to which this SLI is relevant. + // Telemetry from other methods will not be used to calculate + // performance for this SLI. If omitted, this SLI applies to all + // the Service's methods. For service types that don't support + // breaking down by method, setting this field will result in an + // error. + // +kubebuilder:validation:Optional + Method []*string `json:"method,omitempty" tf:"method,omitempty"` + + // The set of API versions to which this SLI is relevant. + // Telemetry from other API versions will not be used to + // calculate performance for this SLI. If omitted, + // this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this + // field will result in an error. + // +kubebuilder:validation:Optional + Version []*string `json:"version,omitempty" tf:"version,omitempty"` +} + +type DistributionCutObservation struct { +} + +type DistributionCutParameters struct { + + // A TimeSeries monitoring filter + // aggregating values to quantify the good service provided. + // Must have ValueType = DISTRIBUTION and + // MetricKind = DELTA or MetricKind = CUMULATIVE. + // +kubebuilder:validation:Required + DistributionFilter *string `json:"distributionFilter" tf:"distribution_filter,omitempty"` + + // Range of numerical values. The computed good_service + // will be the count of values x in the Distribution such + // that range.min <= x <= range.max. inclusive of min and + // max. Open ranges can be defined by setting + // just one of min or max. Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // Structure is documented below. + // +kubebuilder:validation:Required + Range []RangeParameters `json:"range" tf:"range,omitempty"` +} + +type DistributionCutRangeObservation struct { +} + +type DistributionCutRangeParameters struct { + + // max value for the range (inclusive). If not given, + // will be set to "infinity", defining an open range + // ">= range.min" + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Min value for the range (inclusive). If not given, + // will be set to "-infinity", defining an open range + // "< range.max" + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type GoodTotalRatioObservation struct { +} + +type GoodTotalRatioParameters struct { + + // A TimeSeries monitoring filter + // quantifying bad service provided, either demanded service that + // was not provided or demanded service that was of inadequate + // quality. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + BadServiceFilter *string `json:"badServiceFilter,omitempty" tf:"bad_service_filter,omitempty"` + + // A TimeSeries monitoring filter + // quantifying good service provided. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + GoodServiceFilter *string `json:"goodServiceFilter,omitempty" tf:"good_service_filter,omitempty"` + + // A TimeSeries monitoring filter + // quantifying total demanded service. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + TotalServiceFilter *string `json:"totalServiceFilter,omitempty" tf:"total_service_filter,omitempty"` +} + +type GoodTotalRatioThresholdObservation struct { +} + +type GoodTotalRatioThresholdParameters struct { + + // Basic SLI to evaluate to judge window quality. + // Structure is documented below. + // +kubebuilder:validation:Optional + BasicSliPerformance []BasicSliPerformanceParameters `json:"basicSliPerformance,omitempty" tf:"basic_sli_performance,omitempty"` + + // Request-based SLI to evaluate to judge window quality. + // Structure is documented below. + // +kubebuilder:validation:Optional + Performance []PerformanceParameters `json:"performance,omitempty" tf:"performance,omitempty"` + + // A duration string, e.g. 10s. + // Good service is defined to be the count of requests made to + // this service that return in no more than threshold. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type LatencyObservation struct { +} + +type LatencyParameters struct { + + // A duration string, e.g. 10s. + // Good service is defined to be the count of requests made to + // this service that return in no more than threshold. + // +kubebuilder:validation:Required + Threshold *string `json:"threshold" tf:"threshold,omitempty"` +} + +type MetricMeanInRangeObservation struct { +} + +type MetricMeanInRangeParameters struct { + + // Range of numerical values. The computed good_service + // will be the count of values x in the Distribution such + // that range.min <= x <= range.max. inclusive of min and + // max. Open ranges can be defined by setting + // just one of min or max. Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // Structure is documented below. + // +kubebuilder:validation:Required + Range []MetricMeanInRangeRangeParameters `json:"range" tf:"range,omitempty"` + + // A monitoring filter + // specifying the TimeSeries to use for evaluating window + // quality. The provided TimeSeries must have + // ValueType = INT64 or ValueType = DOUBLE and + // MetricKind = GAUGE. + // Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // +kubebuilder:validation:Required + TimeSeries *string `json:"timeSeries" tf:"time_series,omitempty"` +} + +type MetricMeanInRangeRangeObservation struct { +} + +type MetricMeanInRangeRangeParameters struct { + + // max value for the range (inclusive). If not given, + // will be set to "infinity", defining an open range + // ">= range.min" + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Min value for the range (inclusive). If not given, + // will be set to "-infinity", defining an open range + // "< range.max" + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MetricSumInRangeObservation struct { +} + +type MetricSumInRangeParameters struct { + + // Range of numerical values. The computed good_service + // will be the count of values x in the Distribution such + // that range.min <= x <= range.max. inclusive of min and + // max. Open ranges can be defined by setting + // just one of min or max. Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // Structure is documented below. + // +kubebuilder:validation:Required + Range []MetricSumInRangeRangeParameters `json:"range" tf:"range,omitempty"` + + // A monitoring filter + // specifying the TimeSeries to use for evaluating window + // quality. The provided TimeSeries must have + // ValueType = INT64 or ValueType = DOUBLE and + // MetricKind = GAUGE. + // Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // +kubebuilder:validation:Required + TimeSeries *string `json:"timeSeries" tf:"time_series,omitempty"` +} + +type MetricSumInRangeRangeObservation struct { +} + +type MetricSumInRangeRangeParameters struct { + + // max value for the range (inclusive). If not given, + // will be set to "infinity", defining an open range + // ">= range.min" + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Min value for the range (inclusive). If not given, + // will be set to "-infinity", defining an open range + // "< range.max" + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type PerformanceDistributionCutObservation struct { +} + +type PerformanceDistributionCutParameters struct { + + // A TimeSeries monitoring filter + // aggregating values to quantify the good service provided. + // Must have ValueType = DISTRIBUTION and + // MetricKind = DELTA or MetricKind = CUMULATIVE. + // +kubebuilder:validation:Required + DistributionFilter *string `json:"distributionFilter" tf:"distribution_filter,omitempty"` + + // Range of numerical values. The computed good_service + // will be the count of values x in the Distribution such + // that range.min <= x <= range.max. inclusive of min and + // max. Open ranges can be defined by setting + // just one of min or max. Summed value X should satisfy + // range.min <= X <= range.max for a good window. + // Structure is documented below. + // +kubebuilder:validation:Required + Range []DistributionCutRangeParameters `json:"range" tf:"range,omitempty"` +} + +type PerformanceGoodTotalRatioObservation struct { +} + +type PerformanceGoodTotalRatioParameters struct { + + // A TimeSeries monitoring filter + // quantifying bad service provided, either demanded service that + // was not provided or demanded service that was of inadequate + // quality. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + BadServiceFilter *string `json:"badServiceFilter,omitempty" tf:"bad_service_filter,omitempty"` + + // A TimeSeries monitoring filter + // quantifying good service provided. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + GoodServiceFilter *string `json:"goodServiceFilter,omitempty" tf:"good_service_filter,omitempty"` + + // A TimeSeries monitoring filter + // quantifying total demanded service. + // Must have ValueType = DOUBLE or ValueType = INT64 and + // must have MetricKind = DELTA or MetricKind = CUMULATIVE. + // Exactly two of good_service_filter,bad_service_filter,total_service_filter + // must be set (good + bad = total is assumed). + // +kubebuilder:validation:Optional + TotalServiceFilter *string `json:"totalServiceFilter,omitempty" tf:"total_service_filter,omitempty"` +} + +type PerformanceObservation struct { +} + +type PerformanceParameters struct { + + // Used when good_service is defined by a count of values aggregated in a + // Distribution that fall into a good range. The total_service is the + // total count of all values aggregated in the Distribution. + // Defines a distribution TimeSeries filter and thresholds used for + // measuring good service and total service. + // Exactly one of distribution_cut or good_total_ratio can be set. + // Structure is documented below. + // +kubebuilder:validation:Optional + DistributionCut []PerformanceDistributionCutParameters `json:"distributionCut,omitempty" tf:"distribution_cut,omitempty"` + + // A means to compute a ratio of good_service to total_service. + // Defines computing this ratio with two TimeSeries monitoring filters + // Must specify exactly two of good, bad, and total service filters. + // The relationship good_service + bad_service = total_service + // will be assumed. + // Exactly one of distribution_cut or good_total_ratio can be set. + // Structure is documented below. + // +kubebuilder:validation:Optional + GoodTotalRatio []PerformanceGoodTotalRatioParameters `json:"goodTotalRatio,omitempty" tf:"good_total_ratio,omitempty"` +} + +type RangeObservation struct { +} + +type RangeParameters struct { + + // max value for the range (inclusive). If not given, + // will be set to "infinity", defining an open range + // ">= range.min" + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Min value for the range (inclusive). If not given, + // will be set to "-infinity", defining an open range + // "< range.max" + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type RequestBasedSliObservation struct { +} + +type RequestBasedSliParameters struct { + + // Used when good_service is defined by a count of values aggregated in a + // Distribution that fall into a good range. The total_service is the + // total count of all values aggregated in the Distribution. + // Defines a distribution TimeSeries filter and thresholds used for + // measuring good service and total service. + // Exactly one of distribution_cut or good_total_ratio can be set. + // Structure is documented below. + // +kubebuilder:validation:Optional + DistributionCut []DistributionCutParameters `json:"distributionCut,omitempty" tf:"distribution_cut,omitempty"` + + // A means to compute a ratio of good_service to total_service. + // Defines computing this ratio with two TimeSeries monitoring filters + // Must specify exactly two of good, bad, and total service filters. + // The relationship good_service + bad_service = total_service + // will be assumed. + // Exactly one of distribution_cut or good_total_ratio can be set. + // Structure is documented below. + // +kubebuilder:validation:Optional + GoodTotalRatio []GoodTotalRatioParameters `json:"goodTotalRatio,omitempty" tf:"good_total_ratio,omitempty"` +} + +type SLOObservation struct { + + // an identifier for the resource with format {{name}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The full resource name for this service. The syntax is: + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SLOParameters struct { + + // Basic Service-Level Indicator (SLI) on a well-known service type. + // Performance will be computed on the basis of pre-defined metrics. + // SLIs are used to measure and calculate the quality of the Service's + // performance with respect to a single aspect of service quality. + // Exactly one of the following must be set: + // basic_sli, request_based_sli, windows_based_sli + // Structure is documented below. + // +kubebuilder:validation:Optional + BasicSli []BasicSliParameters `json:"basicSli,omitempty" tf:"basic_sli,omitempty"` + + // A calendar period, semantically "since the start of the current + // ". + // Possible values are DAY, WEEK, FORTNIGHT, and MONTH. + // +kubebuilder:validation:Optional + CalendarPeriod *string `json:"calendarPeriod,omitempty" tf:"calendar_period,omitempty"` + + // Name used for UI elements listing this SLO. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The fraction of service that must be good in order for this objective + // to be met. 0 < goal <= 0.999 + // +kubebuilder:validation:Required + Goal *float64 `json:"goal" tf:"goal,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // A request-based SLI defines a SLI for which atomic units of + // service are counted directly. + // A SLI describes a good service. + // It is used to measure and calculate the quality of the Service's + // performance with respect to a single aspect of service quality. + // Exactly one of the following must be set: + // basic_sli, request_based_sli, windows_based_sli + // Structure is documented below. + // +kubebuilder:validation:Optional + RequestBasedSli []RequestBasedSliParameters `json:"requestBasedSli,omitempty" tf:"request_based_sli,omitempty"` + + // A rolling time period, semantically "in the past X days". + // Must be between 1 to 30 days, inclusive. + // +kubebuilder:validation:Optional + RollingPeriodDays *float64 `json:"rollingPeriodDays,omitempty" tf:"rolling_period_days,omitempty"` + + // The id to use for this ServiceLevelObjective. If omitted, an id will be generated instead. + // +kubebuilder:validation:Optional + SLOID *string `json:"sloId,omitempty" tf:"slo_id,omitempty"` + + // ID of the service to which this SLO belongs. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/monitoring/v1beta1.CustomService + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("service_id",false) + // +kubebuilder:validation:Optional + Service *string `json:"service,omitempty" tf:"service,omitempty"` + + // Reference to a CustomService in monitoring to populate service. + // +kubebuilder:validation:Optional + ServiceRef *v1.Reference `json:"serviceRef,omitempty" tf:"-"` + + // Selector for a CustomService in monitoring to populate service. + // +kubebuilder:validation:Optional + ServiceSelector *v1.Selector `json:"serviceSelector,omitempty" tf:"-"` + + // This field is intended to be used for organizing and identifying the AlertPolicy + // objects.The field can contain up to 64 entries. Each key and value is limited + // to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values + // can contain only lowercase letters, numerals, underscores, and dashes. Keys + // must begin with a letter. + // +kubebuilder:validation:Optional + UserLabels map[string]*string `json:"userLabels,omitempty" tf:"user_labels,omitempty"` + + // A windows-based SLI defines the criteria for time windows. + // good_service is defined based off the count of these time windows + // for which the provided service was of good quality. + // A SLI describes a good service. It is used to measure and calculate + // the quality of the Service's performance with respect to a single + // aspect of service quality. + // Exactly one of the following must be set: + // basic_sli, request_based_sli, windows_based_sli + // Structure is documented below. + // +kubebuilder:validation:Optional + WindowsBasedSli []WindowsBasedSliParameters `json:"windowsBasedSli,omitempty" tf:"windows_based_sli,omitempty"` +} + +type WindowsBasedSliObservation struct { +} + +type WindowsBasedSliParameters struct { + + // A TimeSeries monitoring filter + // with ValueType = BOOL. The window is good if any true values + // appear in the window. One of good_bad_metric_filter, + // good_total_ratio_threshold, metric_mean_in_range, + // metric_sum_in_range must be set for windows_based_sli. + // +kubebuilder:validation:Optional + GoodBadMetricFilter *string `json:"goodBadMetricFilter,omitempty" tf:"good_bad_metric_filter,omitempty"` + + // Criterion that describes a window as good if its performance is + // high enough. One of good_bad_metric_filter, + // good_total_ratio_threshold, metric_mean_in_range, + // metric_sum_in_range must be set for windows_based_sli. + // Structure is documented below. + // +kubebuilder:validation:Optional + GoodTotalRatioThreshold []GoodTotalRatioThresholdParameters `json:"goodTotalRatioThreshold,omitempty" tf:"good_total_ratio_threshold,omitempty"` + + // Criterion that describes a window as good if the metric's value + // is in a good range, averaged across returned streams. + // One of good_bad_metric_filter, + // good_total_ratio_threshold, metric_mean_in_range, + // metric_sum_in_range must be set for windows_based_sli. + // Average value X of time_series should satisfy + // range.min <= X <= range.max for a good window. + // Structure is documented below. + // +kubebuilder:validation:Optional + MetricMeanInRange []MetricMeanInRangeParameters `json:"metricMeanInRange,omitempty" tf:"metric_mean_in_range,omitempty"` + + // Criterion that describes a window as good if the metric's value + // is in a good range, summed across returned streams. + // Summed value X of time_series should satisfy + // range.min <= X <= range.max for a good window. + // One of good_bad_metric_filter, + // good_total_ratio_threshold, metric_mean_in_range, + // metric_sum_in_range must be set for windows_based_sli. + // Structure is documented below. + // +kubebuilder:validation:Optional + MetricSumInRange []MetricSumInRangeParameters `json:"metricSumInRange,omitempty" tf:"metric_sum_in_range,omitempty"` + + // Duration over which window quality is evaluated, given as a + // duration string "{X}s" representing X seconds. Must be an + // integer fraction of a day and at least 60s. + // +kubebuilder:validation:Optional + WindowPeriod *string `json:"windowPeriod,omitempty" tf:"window_period,omitempty"` +} + +// SLOSpec defines the desired state of SLO +type SLOSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SLOParameters `json:"forProvider"` +} + +// SLOStatus defines the observed state of SLO. +type SLOStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SLOObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// SLO is the Schema for the SLOs API. A Service-Level Objective (SLO) describes the level of desired good service. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type SLO struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SLOSpec `json:"spec"` + Status SLOStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SLOList contains a list of SLOs +type SLOList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SLO `json:"items"` +} + +// Repository type metadata. +var ( + SLO_Kind = "SLO" + SLO_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SLO_Kind}.String() + SLO_KindAPIVersion = SLO_Kind + "." + CRDGroupVersion.String() + SLO_GroupVersionKind = CRDGroupVersion.WithKind(SLO_Kind) +) + +func init() { + SchemeBuilder.Register(&SLO{}, &SLOList{}) +} diff --git a/apis/monitoring/v1beta1/zz_uptimecheckconfig_types.go b/apis/monitoring/v1beta1/zz_uptimecheckconfig_types.go index 46e467fcc..f6930674d 100755 --- a/apis/monitoring/v1beta1/zz_uptimecheckconfig_types.go +++ b/apis/monitoring/v1beta1/zz_uptimecheckconfig_types.go @@ -167,9 +167,19 @@ type ResourceGroupObservation struct { type ResourceGroupParameters struct { // The group of resources being monitored. Should be the name of a group + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/monitoring/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("name",true) // +kubebuilder:validation:Optional GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + // Reference to a Group in monitoring to populate groupId. + // +kubebuilder:validation:Optional + GroupIDRef *v1.Reference `json:"groupIdRef,omitempty" tf:"-"` + + // Selector for a Group in monitoring to populate groupId. + // +kubebuilder:validation:Optional + GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` + // The resource type of the group members. // Possible values are RESOURCE_TYPE_UNSPECIFIED, INSTANCE, and AWS_ELB_LOAD_BALANCER. // +kubebuilder:validation:Optional diff --git a/config/externalname.go b/config/externalname.go index b159246b8..7f8c72242 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -457,6 +457,16 @@ var externalNameConfigs = map[string]config.ExternalName{ "google_monitoring_notification_channel": config.IdentifierFromProvider, // Imported by using the following format: {{name}} "google_monitoring_uptime_check_config": config.IdentifierFromProvider, + // Service can be imported using Name + "google_monitoring_custom_service": config.IdentifierFromProvider, + // Dashboard can be imported using dashboard_id + "google_monitoring_dashboard": config.IdentifierFromProvider, + // Group can be imported using Name + "google_monitoring_group": config.IdentifierFromProvider, + // MetricDescriptor can be imported using Name + "google_monitoring_metric_descriptor": config.IdentifierFromProvider, + // Slo can be imported using Name + "google_monitoring_slo": config.IdentifierFromProvider, // notebooks // diff --git a/config/externalnamenottested.go b/config/externalnamenottested.go index 41754a9db..d56f1d4fb 100644 --- a/config/externalnamenottested.go +++ b/config/externalnamenottested.go @@ -255,18 +255,15 @@ var ExternalNameNotTestedConfigs = map[string]config.ExternalName{ // projects/my-project/sinks/my-sink "google_logging_project_sink": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/sinks/{{ .external_name }}"), - // monitoring + // memcache // - // Service can be imported using Name - "google_monitoring_custom_service": config.NameAsIdentifier, - // Dashboard can be imported using dashboard_id - "google_monitoring_dashboard": config.IdentifierFromProvider, - // Group can be imported using Name - "google_monitoring_group": config.NameAsIdentifier, - // MetricDescriptor can be imported using Name - "google_monitoring_metric_descriptor": config.NameAsIdentifier, - // Slo can be imported using Name - "google_monitoring_slo": config.NameAsIdentifier, + // nstance can be imported using Name + "google_memcache_instance": config.NameAsIdentifier, + + // mlengine + // + // Model can be imported using Name + "google_ml_engine_model": config.NameAsIdentifier, // network // diff --git a/examples-generated/monitoring/customservice.yaml b/examples-generated/monitoring/customservice.yaml new file mode 100644 index 000000000..b4fa343bc --- /dev/null +++ b/examples-generated/monitoring/customservice.yaml @@ -0,0 +1,20 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: CustomService +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/customservice + labels: + testing.upbound.io/example-name: custom + name: custom +spec: + forProvider: + displayName: My Custom Service custom-srv + serviceId: custom-srv + telemetry: + - resourceName: //product.googleapis.com/foo/foo/services/test + userLabels: + my_key: my_value + my_other_key: my_other_value + +--- + diff --git a/examples-generated/monitoring/dashboard.yaml b/examples-generated/monitoring/dashboard.yaml new file mode 100644 index 000000000..43c8bcf24 --- /dev/null +++ b/examples-generated/monitoring/dashboard.yaml @@ -0,0 +1,25 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: Dashboard +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/dashboard + labels: + testing.upbound.io/example-name: dashboard + name: dashboard +spec: + forProvider: + dashboardJson: |+ + { + "displayName": "Demo Dashboard", + "gridLayout": { + "widgets": [ + { + "blank": {} + } + ] + } + } + + +--- + diff --git a/examples-generated/monitoring/group.yaml b/examples-generated/monitoring/group.yaml new file mode 100644 index 000000000..b931acdff --- /dev/null +++ b/examples-generated/monitoring/group.yaml @@ -0,0 +1,15 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/group + labels: + testing.upbound.io/example-name: basic + name: basic +spec: + forProvider: + displayName: tf-test MonitoringGroup + filter: resource.metadata.region="europe-west2" + +--- + diff --git a/examples-generated/monitoring/metricdescriptor.yaml b/examples-generated/monitoring/metricdescriptor.yaml new file mode 100644 index 000000000..f7c0f7e97 --- /dev/null +++ b/examples-generated/monitoring/metricdescriptor.yaml @@ -0,0 +1,27 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: MetricDescriptor +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/metricdescriptor + labels: + testing.upbound.io/example-name: basic + name: basic +spec: + forProvider: + description: Daily sales records from all branch stores. + displayName: metric-descriptor + labels: + - description: The ID of the store. + key: store_id + valueType: STRING + launchStage: BETA + metadata: + - ingestDelay: 30s + samplePeriod: 60s + metricKind: GAUGE + type: custom.googleapis.com/stores/daily_sales + unit: '{USD}' + valueType: DOUBLE + +--- + diff --git a/examples-generated/monitoring/slo.yaml b/examples-generated/monitoring/slo.yaml new file mode 100644 index 000000000..9779a3066 --- /dev/null +++ b/examples-generated/monitoring/slo.yaml @@ -0,0 +1,26 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: SLO +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/slo + labels: + testing.upbound.io/example-name: appeng_slo + name: appeng-slo +spec: + forProvider: + basicSli: + - latency: + - threshold: 1s + calendarPeriod: DAY + displayName: Terraform Test SLO for App Engine + goal: 0.9 + serviceSelector: + matchLabels: + testing.upbound.io/example-name: google_monitoring_app_engine_service + sloId: ae-slo + userLabels: + my_key: my_value + my_other_key: my_other_value + +--- + diff --git a/examples/monitoring/customservice.yaml b/examples/monitoring/customservice.yaml new file mode 100644 index 000000000..9d8a5bb81 --- /dev/null +++ b/examples/monitoring/customservice.yaml @@ -0,0 +1,17 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: CustomService +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/customservice + labels: + testing.upbound.io/example-name: custom + name: custom +spec: + forProvider: + displayName: My Custom Service custom-srv + serviceId: custom-srv + telemetry: + - resourceName: //product.googleapis.com/foo/foo/services/test + userLabels: + my_key: my_value + my_other_key: my_other_value \ No newline at end of file diff --git a/examples/monitoring/dashboard.yaml b/examples/monitoring/dashboard.yaml new file mode 100644 index 000000000..93e50d012 --- /dev/null +++ b/examples/monitoring/dashboard.yaml @@ -0,0 +1,21 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: Dashboard +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/dashboard + labels: + testing.upbound.io/example-name: dashboard + name: dashboard +spec: + forProvider: + dashboardJson: |+ + { + "displayName": "Demo Dashboard", + "gridLayout": { + "widgets": [ + { + "blank": {} + } + ] + } + } \ No newline at end of file diff --git a/examples/monitoring/group.yaml b/examples/monitoring/group.yaml new file mode 100644 index 000000000..425a48272 --- /dev/null +++ b/examples/monitoring/group.yaml @@ -0,0 +1,12 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/group + labels: + testing.upbound.io/example-name: basic + name: basic +spec: + forProvider: + displayName: tf-test MonitoringGroup + filter: resource.metadata.region="europe-west2" \ No newline at end of file diff --git a/examples/monitoring/metricdescriptor.yaml b/examples/monitoring/metricdescriptor.yaml new file mode 100644 index 000000000..cb0dd6d98 --- /dev/null +++ b/examples/monitoring/metricdescriptor.yaml @@ -0,0 +1,24 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: MetricDescriptor +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/metricdescriptor + labels: + testing.upbound.io/example-name: basic + name: basic +spec: + forProvider: + description: Daily sales records from all branch stores. + displayName: metric-descriptor + labels: + - description: The ID of the store. + key: store_id + valueType: STRING + launchStage: BETA + metadata: + - ingestDelay: 30s + samplePeriod: 60s + metricKind: GAUGE + type: custom.googleapis.com/stores/daily_sales + unit: '{USD}' + valueType: DOUBLE \ No newline at end of file diff --git a/examples/monitoring/slo.yaml b/examples/monitoring/slo.yaml new file mode 100644 index 000000000..8390cd985 --- /dev/null +++ b/examples/monitoring/slo.yaml @@ -0,0 +1,45 @@ +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: SLO +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/slo + labels: + testing.upbound.io/example-name: appeng_slo + name: appeng-slo +spec: + forProvider: + requestBasedSli: + - distributionCut: + - distributionFilter: "metric.type=\"serviceruntime.googleapis.com/api/request_latencies\" resource.type=\"api\"" + range: + - max: 0.5 + calendarPeriod: DAY + displayName: Terraform Test SLO for App Engine + goal: 0.9 + serviceSelector: + matchLabels: + testing.upbound.io/example-name: google_monitoring_app_engine_service + sloId: ae-slo + userLabels: + my_key: my_value + my_other_key: my_other_value + +--- + +apiVersion: monitoring.gcp.upbound.io/v1beta1 +kind: CustomService +metadata: + annotations: + meta.upbound.io/example-id: monitoring/v1beta1/slo + labels: + testing.upbound.io/example-name: google_monitoring_app_engine_service + name: google-monitoring-app-engine-service +spec: + forProvider: + displayName: My Custom Service custom-srv + serviceId: custom-srv + telemetry: + - resourceName: //product.googleapis.com/foo/foo/services/test + userLabels: + my_key: my_value + my_other_key: my_other_value \ No newline at end of file diff --git a/internal/controller/monitoring/customservice/zz_controller.go b/internal/controller/monitoring/customservice/zz_controller.go new file mode 100755 index 000000000..4a07349b6 --- /dev/null +++ b/internal/controller/monitoring/customservice/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package customservice + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/monitoring/v1beta1" +) + +// Setup adds a controller that reconciles CustomService managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.CustomService_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.CustomService_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_monitoring_custom_service"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.CustomService_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.CustomService{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/monitoring/dashboard/zz_controller.go b/internal/controller/monitoring/dashboard/zz_controller.go new file mode 100755 index 000000000..62c98661d --- /dev/null +++ b/internal/controller/monitoring/dashboard/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package dashboard + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/monitoring/v1beta1" +) + +// Setup adds a controller that reconciles Dashboard managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Dashboard_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.Dashboard_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_monitoring_dashboard"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Dashboard_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.Dashboard{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/monitoring/group/zz_controller.go b/internal/controller/monitoring/group/zz_controller.go new file mode 100755 index 000000000..964675084 --- /dev/null +++ b/internal/controller/monitoring/group/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package group + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/monitoring/v1beta1" +) + +// Setup adds a controller that reconciles Group managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Group_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.Group_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_monitoring_group"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Group_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.Group{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/monitoring/metricdescriptor/zz_controller.go b/internal/controller/monitoring/metricdescriptor/zz_controller.go new file mode 100755 index 000000000..09ff799f8 --- /dev/null +++ b/internal/controller/monitoring/metricdescriptor/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package metricdescriptor + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/monitoring/v1beta1" +) + +// Setup adds a controller that reconciles MetricDescriptor managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.MetricDescriptor_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.MetricDescriptor_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_monitoring_metric_descriptor"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.MetricDescriptor_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.MetricDescriptor{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/monitoring/slo/zz_controller.go b/internal/controller/monitoring/slo/zz_controller.go new file mode 100755 index 000000000..47612461e --- /dev/null +++ b/internal/controller/monitoring/slo/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package slo + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/monitoring/v1beta1" +) + +// Setup adds a controller that reconciles SLO managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.SLO_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.SLO_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_monitoring_slo"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.SLO_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.SLO{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 9ccf9b579..1dd8d017c 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -210,7 +210,12 @@ import ( instancememcache "github.com/upbound/provider-gcp/internal/controller/memcache/instance" model "github.com/upbound/provider-gcp/internal/controller/mlengine/model" alertpolicy "github.com/upbound/provider-gcp/internal/controller/monitoring/alertpolicy" + customservice "github.com/upbound/provider-gcp/internal/controller/monitoring/customservice" + dashboard "github.com/upbound/provider-gcp/internal/controller/monitoring/dashboard" + group "github.com/upbound/provider-gcp/internal/controller/monitoring/group" + metricdescriptor "github.com/upbound/provider-gcp/internal/controller/monitoring/metricdescriptor" notificationchannel "github.com/upbound/provider-gcp/internal/controller/monitoring/notificationchannel" + slo "github.com/upbound/provider-gcp/internal/controller/monitoring/slo" uptimecheckconfig "github.com/upbound/provider-gcp/internal/controller/monitoring/uptimecheckconfig" connectivitytest "github.com/upbound/provider-gcp/internal/controller/networkmanagement/connectivitytest" environmentnotebooks "github.com/upbound/provider-gcp/internal/controller/notebooks/environment" @@ -466,7 +471,12 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { instancememcache.Setup, model.Setup, alertpolicy.Setup, + customservice.Setup, + dashboard.Setup, + group.Setup, + metricdescriptor.Setup, notificationchannel.Setup, + slo.Setup, uptimecheckconfig.Setup, connectivitytest.Setup, environmentnotebooks.Setup, diff --git a/package/crds/monitoring.gcp.upbound.io_customservices.yaml b/package/crds/monitoring.gcp.upbound.io_customservices.yaml new file mode 100644 index 000000000..268ec56ac --- /dev/null +++ b/package/crds/monitoring.gcp.upbound.io_customservices.yaml @@ -0,0 +1,332 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: customservices.monitoring.gcp.upbound.io +spec: + group: monitoring.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: CustomService + listKind: CustomServiceList + plural: customservices + singular: customservice + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: CustomService is the Schema for the CustomServices API. A Service + is a discrete, autonomous, and network-accessible unit, designed to solve + an individual concern (Wikipedia). + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CustomServiceSpec defines the desired state of CustomService + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: Name used for UI elements listing this Service. + type: string + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + serviceId: + description: An optional service ID to use. If not given, the + server will generate a service ID. + type: string + telemetry: + description: Configuration for how to query telemetry on a Service. + Structure is documented below. + items: + properties: + resourceName: + description: The full name of the resource that defines + this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. + type: string + type: object + type: array + userLabels: + additionalProperties: + type: string + description: Labels which have been used to annotate the service. + Label keys must start with a letter. Label keys and values may + contain lowercase letters, numbers, underscores, and dashes. + Label keys and values have a maximum length of 63 characters, + and must be less than 128 bytes in size. Up to 64 label entries + may be stored. For labels which do not have a semantic value, + the empty string may be supplied for the label value. + type: object + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CustomServiceStatus defines the observed state of CustomService. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format {{name}} + type: string + name: + description: 'The full resource name for this service. The syntax + is: projects/[PROJECT_ID]/services/[SERVICE_ID].' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/monitoring.gcp.upbound.io_dashboards.yaml b/package/crds/monitoring.gcp.upbound.io_dashboards.yaml new file mode 100644 index 000000000..712b446cd --- /dev/null +++ b/package/crds/monitoring.gcp.upbound.io_dashboards.yaml @@ -0,0 +1,306 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: dashboards.monitoring.gcp.upbound.io +spec: + group: monitoring.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: Dashboard + listKind: DashboardList + plural: dashboards + singular: dashboard + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Dashboard is the Schema for the Dashboards API. A Google Stackdriver + dashboard. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DashboardSpec defines the desired state of Dashboard + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dashboardJson: + description: The JSON representation of a dashboard, following + the format at https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards. + The representation of an existing dashboard can be found by + using the API Explorer + type: string + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + required: + - dashboardJson + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DashboardStatus defines the observed state of Dashboard. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format projects/{project_id_or_number}/dashboards/{dashboard_id} + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/monitoring.gcp.upbound.io_groups.yaml b/package/crds/monitoring.gcp.upbound.io_groups.yaml new file mode 100644 index 000000000..cd429b22c --- /dev/null +++ b/package/crds/monitoring.gcp.upbound.io_groups.yaml @@ -0,0 +1,396 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: groups.monitoring.gcp.upbound.io +spec: + group: monitoring.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: Group + listKind: GroupList + plural: groups + singular: group + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Group is the Schema for the Groups API. The description of a + dynamic collection of monitored resources. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GroupSpec defines the desired state of Group + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: A user-assigned name for this group, used only for + display purposes. + type: string + filter: + description: The filter used to determine which monitored resources + belong to this group. + type: string + isCluster: + description: If true, the members of this group are considered + to be a cluster. The system can perform additional analysis + on groups that are clusters. + type: boolean + parentName: + description: The name of the group's parent, if it has one. The + format is "projects/{project_id_or_number}/groups/{group_id}". + For groups with no parent, parentName is the empty string, "". + type: string + parentNameRef: + description: Reference to a Group in monitoring to populate parentName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + parentNameSelector: + description: Selector for a Group in monitoring to populate parentName. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with the + same controller reference as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + required: + - displayName + - filter + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GroupStatus defines the observed state of Group. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format {{name}} + type: string + name: + description: A unique identifier for this group. The format is + "projects/{project_id_or_number}/groups/{group_id}". + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/monitoring.gcp.upbound.io_metricdescriptors.yaml b/package/crds/monitoring.gcp.upbound.io_metricdescriptors.yaml new file mode 100644 index 000000000..d0e58e2ab --- /dev/null +++ b/package/crds/monitoring.gcp.upbound.io_metricdescriptors.yaml @@ -0,0 +1,421 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: metricdescriptors.monitoring.gcp.upbound.io +spec: + group: monitoring.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: MetricDescriptor + listKind: MetricDescriptorList + plural: metricdescriptors + singular: metricdescriptor + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: MetricDescriptor is the Schema for the MetricDescriptors API. + Defines a metric type and its schema. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MetricDescriptorSpec defines the desired state of MetricDescriptor + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A detailed description of the metric, which can be + used in documentation. + type: string + displayName: + description: A concise name for the metric, which can be displayed + in user interfaces. Use sentence case without an ending period, + for example "Request count". + type: string + labels: + description: The set of labels that can be used to describe a + specific instance of this metric type. In order to delete a + label, the entire resource must be deleted, then created with + the desired labels. Structure is documented below. + items: + properties: + description: + description: A human-readable description for the label. + type: string + key: + description: The key for this label. The key must not exceed + 100 characters. The first character of the key must be + an upper- or lower-case letter, the remaining characters + must be letters, digits or underscores, and the key must + match the regular expression [a-zA-Z][a-zA-Z0-9_]* + type: string + valueType: + description: The type of data that can be assigned to the + label. Default value is STRING. Possible values are STRING, + BOOL, and INT64. + type: string + required: + - key + type: object + type: array + launchStage: + description: The launch stage of the metric definition. Possible + values are LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, + EARLY_ACCESS, ALPHA, BETA, GA, and DEPRECATED. + type: string + metadata: + description: Metadata which can be used to guide usage of the + metric. Structure is documented below. + items: + properties: + ingestDelay: + description: The delay of data points caused by ingestion. + Data points older than this age are guaranteed to be ingested + and available to be read, excluding data loss due to errors. + In [duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration). + type: string + samplePeriod: + description: The sampling period of metric data points. + For metrics which are written periodically, consecutive + data points are stored at this time interval, excluding + data loss due to errors. Metrics with a higher granularity + have a smaller sampling period. In [duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration). + type: string + type: object + type: array + metricKind: + description: Whether the metric records instantaneous values, + changes to a value, etc. Some combinations of metricKind and + valueType might not be supported. Possible values are METRIC_KIND_UNSPECIFIED, + GAUGE, DELTA, and CUMULATIVE. + type: string + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + type: + description: The metric type, including its DNS name prefix. The + type is not URL-encoded. All service defined metrics must be + prefixed with the service name, in the format of {service name}/{relative + metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. + The relative metric name must have only upper and lower-case + letters, digits, '/' and underscores '_' are allowed. Additionally, + the maximum number of characters allowed for the relative_metric_name + is 100. All user-defined metric types have the DNS name custom.googleapis.com, + external.googleapis.com, or logging.googleapis.com/user/. + type: string + unit: + description: The units in which the metric value is reported. + It is only applicable if the valueType is INT64, DOUBLE, or + DISTRIBUTION. The unit defines the representation of the stored + metric values. Different systems may scale the values to be + more easily displayed (so a value of 0.02KBy might be displayed + as 20By, and a value of 3523KBy might be displayed as 3.5MBy). + However, if the unit is KBy, then the value of the metric is + always in thousands of bytes, no matter how it may be displayed. + If you want a custom metric to record the exact number of CPU-seconds + used by a job, you can create an INT64 CUMULATIVE metric whose + unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job + uses 12,005 CPU-seconds, then the value is written as 12005. + Alternatively, if you want a custom metric to record data in + a more granular way, you can create a DOUBLE CUMULATIVE metric + whose unit is ks{CPU}, and then write the value 12.005 (which + is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024). + The supported units are a subset of The Unified Code for Units + of Measure standard. More info can be found in the API documentation + (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors). + type: string + valueType: + description: Whether the measurement is an integer, a floating-point + number, etc. Some combinations of metricKind and valueType might + not be supported. Possible values are BOOL, INT64, DOUBLE, STRING, + and DISTRIBUTION. + type: string + required: + - description + - displayName + - metricKind + - type + - valueType + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MetricDescriptorStatus defines the observed state of MetricDescriptor. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format {{name}} + type: string + monitoredResourceTypes: + description: If present, then a time series, which is identified + partially by a metric type and a MonitoredResourceDescriptor, + that is associated with this metric type can only be associated + with one of the monitored resource types listed here. This field + allows time series to be associated with the intersection of + this metric type and the monitored resource types in this list. + items: + type: string + type: array + name: + description: The resource name of the metric descriptor. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/monitoring.gcp.upbound.io_sloes.yaml b/package/crds/monitoring.gcp.upbound.io_sloes.yaml new file mode 100644 index 000000000..8d019710c --- /dev/null +++ b/package/crds/monitoring.gcp.upbound.io_sloes.yaml @@ -0,0 +1,876 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: sloes.monitoring.gcp.upbound.io +spec: + group: monitoring.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: SLO + listKind: SLOList + plural: sloes + singular: slo + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: SLO is the Schema for the SLOs API. A Service-Level Objective + (SLO) describes the level of desired good service. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SLOSpec defines the desired state of SLO + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + basicSli: + description: 'Basic Service-Level Indicator (SLI) on a well-known + service type. Performance will be computed on the basis of pre-defined + metrics. SLIs are used to measure and calculate the quality + of the Service''s performance with respect to a single aspect + of service quality. Exactly one of the following must be set: + basic_sli, request_based_sli, windows_based_sli Structure is + documented below.' + items: + properties: + availability: + description: Availability based SLI, dervied from count + of requests made to this service that return successfully. + Structure is documented below. + items: + properties: + enabled: + description: Whether an availability SLI is enabled + or not. Must be set to true. Defaults to true. + type: boolean + type: object + type: array + latency: + description: Parameters for a latency threshold SLI. Structure + is documented below. + items: + properties: + threshold: + description: A duration string, e.g. 10s. Good service + is defined to be the count of requests made to this + service that return in no more than threshold. + type: string + required: + - threshold + type: object + type: array + location: + description: An optional set of locations to which this + SLI is relevant. Telemetry from other locations will not + be used to calculate performance for this SLI. If omitted, + this SLI applies to all locations in which the Service + has activity. For service types that don't support breaking + down by location, setting this field will result in an + error. + items: + type: string + type: array + method: + description: An optional set of RPCs to which this SLI is + relevant. Telemetry from other methods will not be used + to calculate performance for this SLI. If omitted, this + SLI applies to all the Service's methods. For service + types that don't support breaking down by method, setting + this field will result in an error. + items: + type: string + type: array + version: + description: The set of API versions to which this SLI is + relevant. Telemetry from other API versions will not be + used to calculate performance for this SLI. If omitted, + this SLI applies to all API versions. For service types + that don't support breaking down by version, setting this + field will result in an error. + items: + type: string + type: array + type: object + type: array + calendarPeriod: + description: A calendar period, semantically "since the start + of the current ". Possible values are DAY, WEEK, FORTNIGHT, + and MONTH. + type: string + displayName: + description: Name used for UI elements listing this SLO. + type: string + goal: + description: The fraction of service that must be good in order + for this objective to be met. 0 < goal <= 0.999 + type: number + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + requestBasedSli: + description: 'A request-based SLI defines a SLI for which atomic + units of service are counted directly. A SLI describes a good + service. It is used to measure and calculate the quality of + the Service''s performance with respect to a single aspect of + service quality. Exactly one of the following must be set: basic_sli, + request_based_sli, windows_based_sli Structure is documented + below.' + items: + properties: + distributionCut: + description: Used when good_service is defined by a count + of values aggregated in a Distribution that fall into + a good range. The total_service is the total count of + all values aggregated in the Distribution. Defines a distribution + TimeSeries filter and thresholds used for measuring good + service and total service. Exactly one of distribution_cut + or good_total_ratio can be set. Structure is documented + below. + items: + properties: + distributionFilter: + description: A TimeSeries monitoring filter aggregating + values to quantify the good service provided. Must + have ValueType = DISTRIBUTION and MetricKind = DELTA + or MetricKind = CUMULATIVE. + type: string + range: + description: Range of numerical values. The computed + good_service will be the count of values x in the + Distribution such that range.min <= x <= range.max. + inclusive of min and max. Open ranges can be defined + by setting just one of min or max. Summed value + X should satisfy range.min <= X <= range.max for + a good window. Structure is documented below. + items: + properties: + max: + description: max value for the range (inclusive). + If not given, will be set to "infinity", defining + an open range ">= range.min" + type: number + min: + description: Min value for the range (inclusive). + If not given, will be set to "-infinity", + defining an open range "< range.max" + type: number + type: object + type: array + required: + - distributionFilter + - range + type: object + type: array + goodTotalRatio: + description: A means to compute a ratio of good_service + to total_service. Defines computing this ratio with two + TimeSeries monitoring filters Must specify exactly two + of good, bad, and total service filters. The relationship + good_service + bad_service = total_service will be assumed. + Exactly one of distribution_cut or good_total_ratio can + be set. Structure is documented below. + items: + properties: + badServiceFilter: + description: A TimeSeries monitoring filter quantifying + bad service provided, either demanded service that + was not provided or demanded service that was of + inadequate quality. Must have ValueType = DOUBLE + or ValueType = INT64 and must have MetricKind = + DELTA or MetricKind = CUMULATIVE. Exactly two of + good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + goodServiceFilter: + description: A TimeSeries monitoring filter quantifying + good service provided. Must have ValueType = DOUBLE + or ValueType = INT64 and must have MetricKind = + DELTA or MetricKind = CUMULATIVE. Exactly two of + good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + totalServiceFilter: + description: A TimeSeries monitoring filter quantifying + total demanded service. Must have ValueType = DOUBLE + or ValueType = INT64 and must have MetricKind = + DELTA or MetricKind = CUMULATIVE. Exactly two of + good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + type: object + type: array + type: object + type: array + rollingPeriodDays: + description: A rolling time period, semantically "in the past + X days". Must be between 1 to 30 days, inclusive. + type: number + service: + description: ID of the service to which this SLO belongs. + type: string + serviceRef: + description: Reference to a CustomService in monitoring to populate + service. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceSelector: + description: Selector for a CustomService in monitoring to populate + service. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with the + same controller reference as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sloId: + description: The id to use for this ServiceLevelObjective. If + omitted, an id will be generated instead. + type: string + userLabels: + additionalProperties: + type: string + description: This field is intended to be used for organizing + and identifying the AlertPolicy objects.The field can contain + up to 64 entries. Each key and value is limited to 63 Unicode + characters or 128 bytes, whichever is smaller. Labels and values + can contain only lowercase letters, numerals, underscores, and + dashes. Keys must begin with a letter. + type: object + windowsBasedSli: + description: 'A windows-based SLI defines the criteria for time + windows. good_service is defined based off the count of these + time windows for which the provided service was of good quality. + A SLI describes a good service. It is used to measure and calculate + the quality of the Service''s performance with respect to a + single aspect of service quality. Exactly one of the following + must be set: basic_sli, request_based_sli, windows_based_sli + Structure is documented below.' + items: + properties: + goodBadMetricFilter: + description: A TimeSeries monitoring filter with ValueType + = BOOL. The window is good if any true values appear in + the window. One of good_bad_metric_filter, good_total_ratio_threshold, + metric_mean_in_range, metric_sum_in_range must be set + for windows_based_sli. + type: string + goodTotalRatioThreshold: + description: Criterion that describes a window as good if + its performance is high enough. One of good_bad_metric_filter, + good_total_ratio_threshold, metric_mean_in_range, metric_sum_in_range + must be set for windows_based_sli. Structure is documented + below. + items: + properties: + basicSliPerformance: + description: Basic SLI to evaluate to judge window + quality. Structure is documented below. + items: + properties: + availability: + description: Availability based SLI, dervied + from count of requests made to this service + that return successfully. Structure is documented + below. + items: + properties: + enabled: + description: Whether an availability SLI + is enabled or not. Must be set to true. + Defaults to true. + type: boolean + type: object + type: array + latency: + description: Parameters for a latency threshold + SLI. Structure is documented below. + items: + properties: + threshold: + description: A duration string, e.g. 10s. + Good service is defined to be the count + of requests made to this service that + return in no more than threshold. + type: string + required: + - threshold + type: object + type: array + location: + description: An optional set of locations to + which this SLI is relevant. Telemetry from + other locations will not be used to calculate + performance for this SLI. If omitted, this + SLI applies to all locations in which the + Service has activity. For service types that + don't support breaking down by location, setting + this field will result in an error. + items: + type: string + type: array + method: + description: An optional set of RPCs to which + this SLI is relevant. Telemetry from other + methods will not be used to calculate performance + for this SLI. If omitted, this SLI applies + to all the Service's methods. For service + types that don't support breaking down by + method, setting this field will result in + an error. + items: + type: string + type: array + version: + description: The set of API versions to which + this SLI is relevant. Telemetry from other + API versions will not be used to calculate + performance for this SLI. If omitted, this + SLI applies to all API versions. For service + types that don't support breaking down by + version, setting this field will result in + an error. + items: + type: string + type: array + type: object + type: array + performance: + description: Request-based SLI to evaluate to judge + window quality. Structure is documented below. + items: + properties: + distributionCut: + description: Used when good_service is defined + by a count of values aggregated in a Distribution + that fall into a good range. The total_service + is the total count of all values aggregated + in the Distribution. Defines a distribution + TimeSeries filter and thresholds used for + measuring good service and total service. + Exactly one of distribution_cut or good_total_ratio + can be set. Structure is documented below. + items: + properties: + distributionFilter: + description: A TimeSeries monitoring filter + aggregating values to quantify the good + service provided. Must have ValueType + = DISTRIBUTION and MetricKind = DELTA + or MetricKind = CUMULATIVE. + type: string + range: + description: Range of numerical values. + The computed good_service will be the + count of values x in the Distribution + such that range.min <= x <= range.max. + inclusive of min and max. Open ranges + can be defined by setting just one of + min or max. Summed value X should satisfy + range.min <= X <= range.max for a good + window. Structure is documented below. + items: + properties: + max: + description: max value for the range + (inclusive). If not given, will + be set to "infinity", defining + an open range ">= range.min" + type: number + min: + description: Min value for the range + (inclusive). If not given, will + be set to "-infinity", defining + an open range "< range.max" + type: number + type: object + type: array + required: + - distributionFilter + - range + type: object + type: array + goodTotalRatio: + description: A means to compute a ratio of good_service + to total_service. Defines computing this ratio + with two TimeSeries monitoring filters Must + specify exactly two of good, bad, and total + service filters. The relationship good_service + + bad_service = total_service will be assumed. + Exactly one of distribution_cut or good_total_ratio + can be set. Structure is documented below. + items: + properties: + badServiceFilter: + description: A TimeSeries monitoring filter + quantifying bad service provided, either + demanded service that was not provided + or demanded service that was of inadequate + quality. Must have ValueType = DOUBLE + or ValueType = INT64 and must have MetricKind + = DELTA or MetricKind = CUMULATIVE. + Exactly two of good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + goodServiceFilter: + description: A TimeSeries monitoring filter + quantifying good service provided. Must + have ValueType = DOUBLE or ValueType + = INT64 and must have MetricKind = DELTA + or MetricKind = CUMULATIVE. Exactly + two of good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + totalServiceFilter: + description: A TimeSeries monitoring filter + quantifying total demanded service. + Must have ValueType = DOUBLE or ValueType + = INT64 and must have MetricKind = DELTA + or MetricKind = CUMULATIVE. Exactly + two of good_service_filter,bad_service_filter,total_service_filter + must be set (good + bad = total is assumed). + type: string + type: object + type: array + type: object + type: array + threshold: + description: A duration string, e.g. 10s. Good service + is defined to be the count of requests made to this + service that return in no more than threshold. + type: number + type: object + type: array + metricMeanInRange: + description: Criterion that describes a window as good if + the metric's value is in a good range, averaged across + returned streams. One of good_bad_metric_filter, good_total_ratio_threshold, + metric_mean_in_range, metric_sum_in_range must be set + for windows_based_sli. Average value X of time_series + should satisfy range.min <= X <= range.max for a good + window. Structure is documented below. + items: + properties: + range: + description: Range of numerical values. The computed + good_service will be the count of values x in the + Distribution such that range.min <= x <= range.max. + inclusive of min and max. Open ranges can be defined + by setting just one of min or max. Summed value + X should satisfy range.min <= X <= range.max for + a good window. Structure is documented below. + items: + properties: + max: + description: max value for the range (inclusive). + If not given, will be set to "infinity", defining + an open range ">= range.min" + type: number + min: + description: Min value for the range (inclusive). + If not given, will be set to "-infinity", + defining an open range "< range.max" + type: number + type: object + type: array + timeSeries: + description: A monitoring filter specifying the TimeSeries + to use for evaluating window quality. The provided + TimeSeries must have ValueType = INT64 or ValueType + = DOUBLE and MetricKind = GAUGE. Summed value X + should satisfy range.min <= X <= range.max for a + good window. + type: string + required: + - range + - timeSeries + type: object + type: array + metricSumInRange: + description: Criterion that describes a window as good if + the metric's value is in a good range, summed across returned + streams. Summed value X of time_series should satisfy + range.min <= X <= range.max for a good window. One of + good_bad_metric_filter, good_total_ratio_threshold, metric_mean_in_range, + metric_sum_in_range must be set for windows_based_sli. + Structure is documented below. + items: + properties: + range: + description: Range of numerical values. The computed + good_service will be the count of values x in the + Distribution such that range.min <= x <= range.max. + inclusive of min and max. Open ranges can be defined + by setting just one of min or max. Summed value + X should satisfy range.min <= X <= range.max for + a good window. Structure is documented below. + items: + properties: + max: + description: max value for the range (inclusive). + If not given, will be set to "infinity", defining + an open range ">= range.min" + type: number + min: + description: Min value for the range (inclusive). + If not given, will be set to "-infinity", + defining an open range "< range.max" + type: number + type: object + type: array + timeSeries: + description: A monitoring filter specifying the TimeSeries + to use for evaluating window quality. The provided + TimeSeries must have ValueType = INT64 or ValueType + = DOUBLE and MetricKind = GAUGE. Summed value X + should satisfy range.min <= X <= range.max for a + good window. + type: string + required: + - range + - timeSeries + type: object + type: array + windowPeriod: + description: Duration over which window quality is evaluated, + given as a duration string "{X}s" representing X seconds. + Must be an integer fraction of a day and at least 60s. + type: string + type: object + type: array + required: + - goal + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SLOStatus defines the observed state of SLO. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format {{name}} + type: string + name: + description: 'The full resource name for this service. The syntax + is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/monitoring.gcp.upbound.io_uptimecheckconfigs.yaml b/package/crds/monitoring.gcp.upbound.io_uptimecheckconfigs.yaml index d29685354..e0e8e5462 100644 --- a/package/crds/monitoring.gcp.upbound.io_uptimecheckconfigs.yaml +++ b/package/crds/monitoring.gcp.upbound.io_uptimecheckconfigs.yaml @@ -294,6 +294,84 @@ spec: description: The group of resources being monitored. Should be the name of a group type: string + groupIdRef: + description: Reference to a Group in monitoring to populate + groupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + groupIdSelector: + description: Selector for a Group in monitoring to populate + groupId. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with + the same controller reference as the selecting object + is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object resourceType: description: The resource type of the group members. Possible values are RESOURCE_TYPE_UNSPECIFIED, INSTANCE, and AWS_ELB_LOAD_BALANCER.