From c23ad319e87d6408f597eb07df53ec20ea3d2ae7 Mon Sep 17 00:00:00 2001 From: Gemma Hou Date: Tue, 18 Jun 2024 20:02:52 +0000 Subject: [PATCH 001/101] Fix value template for global ComputeTargetHTTPSProxy --- ...utetargethttpsproxies.compute.cnrm.cloud.google.com.yaml | 6 +++--- config/servicemappings/compute.yaml | 6 +++--- .../resource-docs/compute/computetargethttpsproxy.md | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml index 1667554778..7c7606ef03 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml @@ -80,7 +80,7 @@ spec: - external properties: external: - description: 'Allowed value: string of the format `projects/{{project}}/locations/{{location}}/certificates/{{value}}`, + description: 'Allowed value: string of the format `projects/{{project}}/locations/global/certificates/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificate` resource.' type: string @@ -113,7 +113,7 @@ spec: - external properties: external: - description: 'Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}`, + description: 'Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/global/certificateMaps/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificateMap` resource.' type: string @@ -185,7 +185,7 @@ spec: - external properties: external: - description: 'Allowed value: string of the format `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}`, + description: 'Allowed value: string of the format `projects/{{project}}/locations/global/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.' type: string diff --git a/config/servicemappings/compute.yaml b/config/servicemappings/compute.yaml index 2f63370596..420f4c8e02 100644 --- a/config/servicemappings/compute.yaml +++ b/config/servicemappings/compute.yaml @@ -2446,7 +2446,7 @@ spec: kind: CertificateManagerCertificate version: v1beta1 group: certificatemanager.cnrm.cloud.google.com - valueTemplate: projects/{{project}}/locations/{{location}}/certificates/{{value}} + valueTemplate: projects/{{project}}/locations/global/certificates/{{value}} - tfField: ssl_certificates description: |- A list of ComputeSSLCertificate resources that are used to @@ -2479,7 +2479,7 @@ spec: kind: CertificateManagerCertificateMap version: v1beta1 group: certificatemanager.cnrm.cloud.google.com - valueTemplate: "//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}" + valueTemplate: "//certificatemanager.googleapis.com/projects/{{project}}/locations/global/certificateMaps/{{value}}" - key: serverTlsPolicyRef tfField: server_tls_policy description: |- @@ -2496,7 +2496,7 @@ spec: kind: NetworkSecurityServerTLSPolicy version: v1beta1 group: networksecurity.cnrm.cloud.google.com - valueTemplate: "projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}" + valueTemplate: "projects/{{project}}/locations/global/serverTlsPolicies/{{value}}" dclBasedResource: true containers: - type: project diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md index 81505f6375..e2814ac409 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md @@ -148,7 +148,7 @@ sslCertificates and certificateManagerCertificates fields can not be defined tog

string

-

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/{{location}}/certificates/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificate` resource.{% endverbatim %}

+

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/global/certificates/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificate` resource.{% endverbatim %}

@@ -190,7 +190,7 @@ can only be set for global target proxies.{% endverbatim %}

string

-

{% verbatim %}Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificateMap` resource.{% endverbatim %}

+

{% verbatim %}Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/global/certificateMaps/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificateMap` resource.{% endverbatim %}

@@ -307,7 +307,7 @@ If left blank, communications are not encrypted.{% endverbatim %}

string

-

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.{% endverbatim %}

+

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/global/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.{% endverbatim %}

From 1d44491beb54dfe846a6919a2491be9737815105 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 8 Feb 2024 12:58:30 -0500 Subject: [PATCH 002/101] mockgcp: Support for MonitoringUptimeCheckConfig Introduce a fieldmask helper also. --- config/tests/samples/create/harness.go | 3 +- mockgcp/mockmonitoring/dashboard.go | 3 + mockgcp/mockmonitoring/fieldmaskutils.go | 91 + mockgcp/mockmonitoring/service.go | 4 +- mockgcp/mockmonitoring/uptimecheck.go | 194 ++ .../v1beta1/monitoringdashboard/_http.log | 1612 +++++++++++++++++ ...d_object_httpuptimecheckconfig.golden.yaml | 62 + .../httpuptimecheckconfig/_http.log | 356 ++++ .../httpuptimecheckconfig/update.yaml | 4 +- tests/e2e/normalize.go | 9 + tests/e2e/unified_test.go | 2 + 11 files changed, 2336 insertions(+), 4 deletions(-) create mode 100644 mockgcp/mockmonitoring/fieldmaskutils.go create mode 100644 mockgcp/mockmonitoring/uptimecheck.go create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/_http.log create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_generated_object_httpuptimecheckconfig.golden.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_http.log diff --git a/config/tests/samples/create/harness.go b/config/tests/samples/create/harness.go index 195f741829..c0b2aa1f65 100644 --- a/config/tests/samples/create/harness.go +++ b/config/tests/samples/create/harness.go @@ -678,8 +678,9 @@ func MaybeSkip(t *testing.T, name string, resources []*unstructured.Unstructured case schema.GroupKind{Group: "logging.cnrm.cloud.google.com", Kind: "LoggingLogBucket"}: case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringAlertPolicy"}: - case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringNotificationChannel"}: case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringDashboard"}: + case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringNotificationChannel"}: + case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringUptimeCheckConfig"}: case schema.GroupKind{Group: "networkservices.cnrm.cloud.google.com", Kind: "NetworkServicesMesh"}: diff --git a/mockgcp/mockmonitoring/dashboard.go b/mockgcp/mockmonitoring/dashboard.go index cfe97b9726..fd733ab4c1 100644 --- a/mockgcp/mockmonitoring/dashboard.go +++ b/mockgcp/mockmonitoring/dashboard.go @@ -43,6 +43,9 @@ func (s *DashboardsService) GetDashboard(ctx context.Context, req *pb.GetDashboa obj := &pb.Dashboard{} if err := s.storage.Get(ctx, fqn, obj); err != nil { + if status.Code(err) == codes.NotFound { + return nil, status.Errorf(codes.NotFound, "Requested entity was not found.") + } return nil, err } diff --git a/mockgcp/mockmonitoring/fieldmaskutils.go b/mockgcp/mockmonitoring/fieldmaskutils.go new file mode 100644 index 0000000000..517221a2e1 --- /dev/null +++ b/mockgcp/mockmonitoring/fieldmaskutils.go @@ -0,0 +1,91 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockmonitoring + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func setField(dest proto.Message, src proto.Message, path string) error { + tokens := strings.Split(path, ".") + if len(tokens) == 0 { + return fmt.Errorf("path is not valid: %q", path) + } + + val, err := getFieldValue(src.ProtoReflect(), tokens) + if err != nil { + return err + } + + if err := setFieldValue(dest.ProtoReflect(), tokens, val); err != nil { + return err + } + return nil +} + +func getFieldValue(src protoreflect.Message, path []string) (protoreflect.Value, error) { + remainder := path + for { + token := remainder[0] + remainder = remainder[1:] + + srcType := src.Descriptor() + srcField := srcType.Fields().ByJSONName(token) + if srcField == nil { + return protoreflect.Value{}, fmt.Errorf("field %q not found in path %q", token, strings.Join(path, ".")) + } + srcValue := src.Get(srcField) + if len(remainder) == 0 { + return srcValue, nil + } + switch srcValue := srcValue.Interface().(type) { + case protoreflect.Message: + src = srcValue + + default: + return protoreflect.Value{}, fmt.Errorf("unhandled type %T", srcValue) + } + } +} + +func setFieldValue(dest protoreflect.Message, path []string, val protoreflect.Value) error { + remainder := path + for { + token := remainder[0] + remainder = remainder[1:] + + destType := dest.Descriptor() + destField := destType.Fields().ByJSONName(token) + if destField == nil { + return fmt.Errorf("field %q not found in path %q", token, strings.Join(path, ".")) + } + if len(remainder) == 0 { + dest.Set(destField, val) + return nil + } + destValue := dest.Mutable(destField) + switch destValue := destValue.Interface().(type) { + case protoreflect.Message: + dest = destValue + + default: + return fmt.Errorf("unhandled type %T", destValue) + } + } +} diff --git a/mockgcp/mockmonitoring/service.go b/mockgcp/mockmonitoring/service.go index 0cf6645380..1843e46221 100644 --- a/mockgcp/mockmonitoring/service.go +++ b/mockgcp/mockmonitoring/service.go @@ -52,6 +52,8 @@ func (s *MockService) ExpectedHost() string { func (s *MockService) Register(grpcServer *grpc.Server) { monitoringpb.RegisterAlertPolicyServiceServer(grpcServer, &AlertPolicyService{MockService: s}) monitoringpb.RegisterNotificationChannelServiceServer(grpcServer, &NotificationChannelService{MockService: s}) + monitoringpb.RegisterUptimeCheckServiceServer(grpcServer, &UptimeCheckService{MockService: s}) + dashboardpb.RegisterDashboardsServiceServer(grpcServer, &DashboardsService{MockService: s}) } @@ -59,6 +61,7 @@ func (s *MockService) NewHTTPMux(ctx context.Context, conn *grpc.ClientConn) (ht mux, err := httpmux.NewServeMux(ctx, conn, httpmux.Options{}, monitoringpb.RegisterAlertPolicyServiceHandler, monitoringpb.RegisterNotificationChannelServiceHandler, + monitoringpb.RegisterUptimeCheckServiceHandler, dashboardpb.RegisterDashboardsServiceHandler) if err != nil { return nil, err @@ -67,7 +70,6 @@ func (s *MockService) NewHTTPMux(ctx context.Context, conn *grpc.ClientConn) (ht // Returns slightly non-standard errors mux.RewriteError = func(ctx context.Context, error *httpmux.ErrorResponse) { if error.Code == 404 { - error.Message = "Requested entity was not found." error.Errors = nil } } diff --git a/mockgcp/mockmonitoring/uptimecheck.go b/mockgcp/mockmonitoring/uptimecheck.go new file mode 100644 index 0000000000..8b15faa4c6 --- /dev/null +++ b/mockgcp/mockmonitoring/uptimecheck.go @@ -0,0 +1,194 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockmonitoring + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/GoogleCloudPlatform/k8s-config-connector/mockgcp/common/projects" + pb "github.com/GoogleCloudPlatform/k8s-config-connector/mockgcp/generated/mockgcp/monitoring/v3" + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +type UptimeCheckService struct { + *MockService + pb.UnimplementedUptimeCheckServiceServer +} + +func (s *UptimeCheckService) GetUptimeCheckConfig(ctx context.Context, req *pb.GetUptimeCheckConfigRequest) (*pb.UptimeCheckConfig, error) { + name, err := s.parseUptimeCheckConfigName(req.GetName()) + if err != nil { + return nil, err + } + + fqn := name.String() + + obj := &pb.UptimeCheckConfig{} + if err := s.storage.Get(ctx, fqn, obj); err != nil { + if status.Code(err) == codes.NotFound { + return nil, status.Errorf(codes.NotFound, "Config not found for check %s in project %s", name.Name, name.Project.ID) + } + return nil, err + } + + return redactUptimeCheckConfig(obj), nil +} + +func redactUptimeCheckConfig(obj *pb.UptimeCheckConfig) *pb.UptimeCheckConfig { + // Fields containing sensitive information like authentication tokens or contact info are only partially populated on retrieval. + redacted := proto.Clone(obj).(*pb.UptimeCheckConfig) + if authInfo := redacted.GetHttpCheck().GetAuthInfo(); authInfo != nil { + authInfo.Password = strings.Repeat("*", 6) + } + if headers := redacted.GetHttpCheck().GetHeaders(); headers != nil { + for k := range headers { + headers[k] = "******" + } + } + return redacted +} + +func populateDefaultsForUptimeCheckConfig(obj *pb.UptimeCheckConfig) { + if obj.CheckerType == pb.UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED { + obj.CheckerType = pb.UptimeCheckConfig_STATIC_IP_CHECKERS + } + + if httpCheck := obj.GetHttpCheck(); httpCheck != nil { + if httpCheck.Body != nil { + // Users can provide a `Content-Length` header via the `headers` field or the API will do so. + if httpCheck.Headers == nil { + httpCheck.Headers = make(map[string]string) + } + foundContentLength := false + for k := range httpCheck.Headers { + if strings.ToLower(k) == "content-length" { + foundContentLength = true + } + } + if !foundContentLength { + httpCheck.Headers["Content-Length"] = strconv.Itoa(len(httpCheck.Body)) + } + } + } +} +func (s *UptimeCheckService) CreateUptimeCheckConfig(ctx context.Context, req *pb.CreateUptimeCheckConfigRequest) (*pb.UptimeCheckConfig, error) { + now := time.Now() + + uptimeCheckConfigID := fmt.Sprintf("%d", now.UnixNano()) + + reqName := req.GetParent() + "/uptimeCheckConfigs/" + uptimeCheckConfigID + name, err := s.parseUptimeCheckConfigName(reqName) + if err != nil { + return nil, err + } + + fqn := name.String() + + obj := proto.Clone(req.UptimeCheckConfig).(*pb.UptimeCheckConfig) + obj.Name = fqn + + populateDefaultsForUptimeCheckConfig(obj) + + if err := s.storage.Create(ctx, fqn, obj); err != nil { + return nil, status.Errorf(codes.Internal, "error creating uptimeCheckConfig: %v", err) + } + + return redactUptimeCheckConfig(obj), nil +} + +func (s *UptimeCheckService) UpdateUptimeCheckConfig(ctx context.Context, req *pb.UpdateUptimeCheckConfigRequest) (*pb.UptimeCheckConfig, error) { + name, err := s.parseUptimeCheckConfigName(req.GetUptimeCheckConfig().GetName()) + if err != nil { + return nil, err + } + + fqn := name.String() + + existing := &pb.UptimeCheckConfig{} + if err := s.storage.Get(ctx, fqn, existing); err != nil { + return nil, err + } + + updated := proto.Clone(existing).(*pb.UptimeCheckConfig) + + for _, path := range req.GetUpdateMask().GetPaths() { + // TODO: Validate path? + if err := setField(updated, req.GetUptimeCheckConfig(), path); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "error setting field %q: %v", path, err) + } + } + + populateDefaultsForUptimeCheckConfig(updated) + + if err := s.storage.Update(ctx, fqn, updated); err != nil { + return nil, status.Errorf(codes.Internal, "error updating uptimeCheckConfig: %v", err) + } + + return redactUptimeCheckConfig(updated), nil +} + +func (s *UptimeCheckService) DeleteUptimeCheckConfig(ctx context.Context, req *pb.DeleteUptimeCheckConfigRequest) (*empty.Empty, error) { + name, err := s.parseUptimeCheckConfigName(req.Name) + if err != nil { + return nil, err + } + + fqn := name.String() + + deleted := &pb.UptimeCheckConfig{} + if err := s.storage.Delete(ctx, fqn, deleted); err != nil { + return nil, err + } + + return &empty.Empty{}, nil +} + +type uptimeCheckConfigName struct { + Project *projects.ProjectData + Name string +} + +func (n *uptimeCheckConfigName) String() string { + return "projects/" + n.Project.ID + "/uptimeCheckConfigs/" + n.Name +} + +// parseUptimeCheckConfigName parses a string into a uptimeCheckConfigName. +// The expected form is projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] +func (s *MockService) parseUptimeCheckConfigName(name string) (*uptimeCheckConfigName, error) { + tokens := strings.Split(name, "/") + + if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "uptimeCheckConfigs" { + project, err := s.Projects.GetProjectByID(tokens[1]) + if err != nil { + return nil, err + } + + name := &uptimeCheckConfigName{ + Project: project, + Name: tokens[3], + } + + return name, nil + } else { + return nil, status.Errorf(codes.InvalidArgument, "name %q is not valid", name) + } +} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/_http.log new file mode 100644 index 0000000000..5b9ac5fec2 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/_http.log @@ -0,0 +1,1612 @@ +POST https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "columnLayout": { + "columns": [ + { + "weight": 2, + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +PATCH https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "columnLayout": { + "columns": [ + { + "weight": 2, + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "thresholds": [], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "thresholds": [], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-${uniqueId}", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-${uniqueId}", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-${uniqueId}", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-${uniqueId}", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN" + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-${uniqueId}", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +DELETE https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_generated_object_httpuptimecheckconfig.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_generated_object_httpuptimecheckconfig.golden.yaml new file mode 100644 index 0000000000..7442d3ba93 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_generated_object_httpuptimecheckconfig.golden.yaml @@ -0,0 +1,62 @@ +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringUptimeCheckConfig +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/mutable-but-unreadable-fields: '{"spec":{"httpCheck":{"authInfo":{"password":{"valueFrom":{"secretKeyRef":{"key":"password","name":"secret-2-${uniqueId}"}}}},"headers":{"header-one":"value-one","header-two":"value-two"}}}}' + cnrm.cloud.google.com/observed-secret-versions: (removed) + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 3 + labels: + cnrm-test: "true" + name: monitoringuptimecheckconfig-${uniqueId} + namespace: ${uniqueId} +spec: + contentMatchers: + - content: .* + matcher: MATCHES_REGEX + displayName: updated-http-uptime-check + httpCheck: + authInfo: + password: + valueFrom: + secretKeyRef: + key: password + name: secret-2-${uniqueId} + username: new-name + body: bmV3LXN0cmluZwo= + contentType: URL_ENCODED + headers: + header-one: value-one + header-two: value-two + maskHeaders: true + path: /other + port: 81 + requestMethod: POST + useSsl: true + validateSsl: true + monitoredResource: + filterLabels: + host: 192.168.1.1 + project_id: ${projectId} + type: uptime_url + period: 300s + projectRef: + external: projects/${projectId} + resourceID: ${uptimeCheckConfigId} + selectedRegions: + - EUROPE + - SOUTH_AMERICA + - ASIA_PACIFIC + timeout: 30s +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 3 diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_http.log new file mode 100644 index 0000000000..bce4cf0c7f --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/_http.log @@ -0,0 +1,356 @@ +POST https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "testABC", + "username": "name" + }, + "body": "c3RyaW5nCg==", + "contentType": "URL_ENCODED", + "headers": { + "header-one": "value-one" + }, + "maskHeaders": true, + "path": "/main", + "port": 80, + "requestMethod": "POST", + "useSsl": true, + "validateSsl": false + }, + "monitoredResource": { + "labels": { + "host": "192.168.1.1", + "project_id": "${projectId}" + }, + "type": "uptime_url" + }, + "period": "60s", + "selectedRegions": [ + "USA" + ], + "timeout": "30s" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "checkerType": "STATIC_IP_CHECKERS", + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "******", + "username": "name" + }, + "body": "c3RyaW5nCg==", + "contentType": "URL_ENCODED", + "headers": { + "Content-Length": "******", + "header-one": "******" + }, + "maskHeaders": true, + "path": "/main", + "port": 80, + "requestMethod": "POST", + "useSsl": true + }, + "monitoredResource": { + "labels": { + "host": "192.168.1.1", + "project_id": "${projectId}" + }, + "type": "uptime_url" + }, + "name": "projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}", + "period": "60s", + "selectedRegions": [ + "USA" + ], + "timeout": "30s" +} + +--- + +GET https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "checkerType": "STATIC_IP_CHECKERS", + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "******", + "username": "name" + }, + "body": "c3RyaW5nCg==", + "contentType": "URL_ENCODED", + "headers": { + "Content-Length": "******", + "header-one": "******" + }, + "maskHeaders": true, + "path": "/main", + "port": 80, + "requestMethod": "POST", + "useSsl": true + }, + "monitoredResource": { + "labels": { + "host": "192.168.1.1", + "project_id": "${projectId}" + }, + "type": "uptime_url" + }, + "name": "projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}", + "period": "60s", + "selectedRegions": [ + "USA" + ], + "timeout": "30s" +} + +--- + +PATCH https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}?alt=json&updateMask=displayName%2ChttpCheck.authInfo.password%2ChttpCheck.authInfo.username%2ChttpCheck.body%2ChttpCheck.headers%2ChttpCheck.path%2ChttpCheck.port%2ChttpCheck.validateSsl%2Cperiod%2CselectedRegions +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "updated-http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "testXYZ", + "username": "new-name" + }, + "body": "bmV3LXN0cmluZwo=", + "contentType": "URL_ENCODED", + "headers": { + "header-one": "value-one", + "header-two": "value-two" + }, + "maskHeaders": true, + "path": "/other", + "port": 81, + "requestMethod": "POST", + "useSsl": true, + "validateSsl": true + }, + "name": "projects/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}", + "period": "300s", + "selectedRegions": [ + "EUROPE", + "SOUTH_AMERICA", + "ASIA_PACIFIC" + ], + "timeout": "30s" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "checkerType": "STATIC_IP_CHECKERS", + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "updated-http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "******", + "username": "new-name" + }, + "body": "bmV3LXN0cmluZwo=", + "contentType": "URL_ENCODED", + "headers": { + "Content-Length": "******", + "header-one": "******", + "header-two": "******" + }, + "maskHeaders": true, + "path": "/other", + "port": 81, + "requestMethod": "POST", + "useSsl": true, + "validateSsl": true + }, + "monitoredResource": { + "labels": { + "host": "192.168.1.1", + "project_id": "${projectId}" + }, + "type": "uptime_url" + }, + "name": "projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}", + "period": "300s", + "selectedRegions": [ + "EUROPE", + "SOUTH_AMERICA", + "ASIA_PACIFIC" + ], + "timeout": "30s" +} + +--- + +GET https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "checkerType": "STATIC_IP_CHECKERS", + "contentMatchers": [ + { + "content": ".*", + "matcher": "MATCHES_REGEX" + } + ], + "displayName": "updated-http-uptime-check", + "httpCheck": { + "authInfo": { + "password": "******", + "username": "new-name" + }, + "body": "bmV3LXN0cmluZwo=", + "contentType": "URL_ENCODED", + "headers": { + "Content-Length": "******", + "header-one": "******", + "header-two": "******" + }, + "maskHeaders": true, + "path": "/other", + "port": 81, + "requestMethod": "POST", + "useSsl": true, + "validateSsl": true + }, + "monitoredResource": { + "labels": { + "host": "192.168.1.1", + "project_id": "${projectId}" + }, + "type": "uptime_url" + }, + "name": "projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}", + "period": "300s", + "selectedRegions": [ + "EUROPE", + "SOUTH_AMERICA", + "ASIA_PACIFIC" + ], + "timeout": "30s" +} + +--- + +DELETE https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{} + +--- + +GET https://monitoring.googleapis.com/v3/projects/${projectId}/uptimeCheckConfigs/${uptimeCheckConfigId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Config not found for check ${uptimeCheckConfigId} in project ${projectId}", + "status": "NOT_FOUND" + } +} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/update.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/update.yaml index df8caf64d8..a06f216cca 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringuptimecheckconfig/httpuptimecheckconfig/update.yaml @@ -23,8 +23,8 @@ spec: period: 300s timeout: 30s contentMatchers: - - content: "" - matcher: "CONTAINS_STRING" + - content: ".*" + matcher: "MATCHES_REGEX" selectedRegions: - EUROPE - SOUTH_AMERICA diff --git a/tests/e2e/normalize.go b/tests/e2e/normalize.go index d393cc4bcb..b96341553b 100644 --- a/tests/e2e/normalize.go +++ b/tests/e2e/normalize.go @@ -27,6 +27,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test" testgcp "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test/gcp" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" ) @@ -114,6 +115,7 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject if name == "" { name, _, _ = unstructured.NestedString(u.Object, "status", "name") } + resourceID, _, _ := unstructured.NestedString(u.Object, "spec", "resourceID") tokens := strings.Split(name, "/") if len(tokens) > 2 { typeName := tokens[len(tokens)-2] @@ -129,6 +131,13 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject }) } } + + switch u.GroupVersionKind() { + case schema.GroupVersionKind{Group: "monitoring.cnrm.cloud.google.com", Version: "v1beta1", Kind: "MonitoringUptimeCheckConfig"}: + visitor.stringTransforms = append(visitor.stringTransforms, func(path string, s string) string { + return strings.ReplaceAll(s, resourceID, "${uptimeCheckConfigId}") + }) + } } return visitor.VisitUnstructued(u) diff --git a/tests/e2e/unified_test.go b/tests/e2e/unified_test.go index 28ee6d6fc3..3f5955f1da 100644 --- a/tests/e2e/unified_test.go +++ b/tests/e2e/unified_test.go @@ -349,6 +349,8 @@ func runScenario(ctx context.Context, t *testing.T, testPause bool, fixture reso pathIDs[id] = "${alertPolicyID}" case "conditions": pathIDs[id] = "${conditionID}" + case "uptimeCheckConfigs": + pathIDs[id] = "${uptimeCheckConfigId}" case "operations": operationIDs[id] = true pathIDs[id] = "${operationID}" From b307fdc6a3d6ae98def6ac8621fea7bedda62506 Mon Sep 17 00:00:00 2001 From: justinsb Date: Mon, 24 Jun 2024 13:45:37 -0400 Subject: [PATCH 003/101] refactor: move IAMServiceAccount reference resolution into API package This makes it easier to consume. --- .../v1beta1/gkehubfeaturemembership_types.go | 4 +- apis/gkehub/v1beta1/zz_generated.deepcopy.go | 4 +- apis/refs/v1beta1/gcpserviceaccountref.go | 99 ++++++++++++++++++- .../gkehub/featuremembership_controller.go | 44 ++------- pkg/controller/direct/gkehub/references.go | 57 ----------- 5 files changed, 108 insertions(+), 100 deletions(-) diff --git a/apis/gkehub/v1beta1/gkehubfeaturemembership_types.go b/apis/gkehub/v1beta1/gkehubfeaturemembership_types.go index b775b7844b..d4d5167ee3 100644 --- a/apis/gkehub/v1beta1/gkehubfeaturemembership_types.go +++ b/apis/gkehub/v1beta1/gkehubfeaturemembership_types.go @@ -85,7 +85,7 @@ type FeaturemembershipConfigmanagement struct { type FeaturemembershipGit struct { // +optional - GcpServiceAccountRef *refs.GcpServiceAccountRef `json:"gcpServiceAccountRef,omitempty"` + GcpServiceAccountRef *refs.IAMServiceAccountRef `json:"gcpServiceAccountRef,omitempty"` /* URL for the HTTPS proxy to be used when communicating with the Git repo. */ // +optional @@ -148,7 +148,7 @@ type FeaturemembershipMonitoring struct { type FeaturemembershipOci struct { // +optional - GcpServiceAccountRef *refs.GcpServiceAccountRef `json:"gcpServiceAccountRef,omitempty"` + GcpServiceAccountRef *refs.IAMServiceAccountRef `json:"gcpServiceAccountRef,omitempty"` /* The absolute path of the directory that contains the local resources. Default: the root directory of the image. */ // +optional diff --git a/apis/gkehub/v1beta1/zz_generated.deepcopy.go b/apis/gkehub/v1beta1/zz_generated.deepcopy.go index 6b303c8051..21ea24f0ee 100644 --- a/apis/gkehub/v1beta1/zz_generated.deepcopy.go +++ b/apis/gkehub/v1beta1/zz_generated.deepcopy.go @@ -115,7 +115,7 @@ func (in *FeaturemembershipGit) DeepCopyInto(out *FeaturemembershipGit) { *out = *in if in.GcpServiceAccountRef != nil { in, out := &in.GcpServiceAccountRef, &out.GcpServiceAccountRef - *out = new(refsv1beta1.GcpServiceAccountRef) + *out = new(refsv1beta1.IAMServiceAccountRef) **out = **in } if in.HttpsProxy != nil { @@ -245,7 +245,7 @@ func (in *FeaturemembershipOci) DeepCopyInto(out *FeaturemembershipOci) { *out = *in if in.GcpServiceAccountRef != nil { in, out := &in.GcpServiceAccountRef, &out.GcpServiceAccountRef - *out = new(refsv1beta1.GcpServiceAccountRef) + *out = new(refsv1beta1.IAMServiceAccountRef) **out = **in } if in.PolicyDir != nil { diff --git a/apis/refs/v1beta1/gcpserviceaccountref.go b/apis/refs/v1beta1/gcpserviceaccountref.go index fb6eb7e07b..992950b038 100644 --- a/apis/refs/v1beta1/gcpserviceaccountref.go +++ b/apis/refs/v1beta1/gcpserviceaccountref.go @@ -14,8 +14,20 @@ package v1beta1 -type MetricsGcpServiceAccountRef struct { - /* The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring. The GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Allowed value: The `email` field of an `IAMServiceAccount` resource. */ +import ( + "context" + "fmt" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type IAMServiceAccountRef struct { + /* The GCP Service Account Email used for auth when secretType is gcpServiceAccount. Allowed value: The `email` field of an `IAMServiceAccount` resource. */ External string `json:"external,omitempty"` /* Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names */ Name string `json:"name,omitempty"` @@ -23,11 +35,90 @@ type MetricsGcpServiceAccountRef struct { Namespace string `json:"namespace,omitempty"` } -type GcpServiceAccountRef struct { - /* The GCP Service Account Email used for auth when secretType is gcpServiceAccount. Allowed value: The `email` field of an `IAMServiceAccount` resource. */ +type MetricsGcpServiceAccountRef struct { + /* The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring. The GSA should have the Monitoring Metric Writer(roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Allowed value: The `email` field of an `IAMServiceAccount` resource. */ External string `json:"external,omitempty"` /* Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names */ Name string `json:"name,omitempty"` /* Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ */ Namespace string `json:"namespace,omitempty"` } + +func (r *MetricsGcpServiceAccountRef) Resolve(ctx context.Context, reader client.Reader, src client.Object) error { + if r == nil { + return nil + } + + serviceAccountInfo, err := resolveServiceAccount(ctx, reader, src, r.Name, r.Namespace, r.External) + if err != nil { + return err + } + *r = MetricsGcpServiceAccountRef{External: serviceAccountInfo.External} + return nil +} + +func (r *IAMServiceAccountRef) Resolve(ctx context.Context, reader client.Reader, src client.Object) error { + if r == nil { + return nil + } + + serviceAccountInfo, err := resolveServiceAccount(ctx, reader, src, r.Name, r.Namespace, r.External) + if err != nil { + return err + } + *r = IAMServiceAccountRef{External: serviceAccountInfo.External} + return nil +} + +type serviceAccountInfo struct { + External string +} + +func resolveServiceAccount(ctx context.Context, reader client.Reader, src client.Object, name, namespace, external string) (*serviceAccountInfo, error) { + if external != "" { + if name != "" { + return nil, fmt.Errorf("cannot specify both name and external on an IAMServiceAccount reference") + } + + if strings.Contains(external, "@") { + return &serviceAccountInfo{External: external}, nil + } + return nil, fmt.Errorf("format of IAMServiceAccount reference external=%q was not known (use email address)", external) + } + + if name == "" { + return nil, fmt.Errorf("must specify either name or external on an IAMServiceAccount reference") + } + + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + if key.Namespace == "" { + key.Namespace = src.GetNamespace() + } + + computenetwork := &unstructured.Unstructured{} + computenetwork.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "iam.cnrm.cloud.google.com", + Version: "v1beta1", + Kind: "IAMServiceAccount", + }) + if err := reader.Get(ctx, key, computenetwork); err != nil { + if apierrors.IsNotFound(err) { + return nil, fmt.Errorf("referenced IAMServiceAccount %v not found", key) + } + return nil, fmt.Errorf("error reading referenced IAMServiceAccount %v: %w", key, err) + } + + email, _, err := unstructured.NestedString(computenetwork.Object, "status", "email") + if err != nil { + return nil, fmt.Errorf("reading status.email from IAMServiceAccount %v: %w", key, err) + } + // if the status.email not populated, should we construct the email from spec.resourceID or metadata.name. + if email == "" { + return nil, fmt.Errorf("status.email is empty from IAMServiceAccount %v, expected not-empty", key) + } + + return &serviceAccountInfo{External: email}, nil +} diff --git a/pkg/controller/direct/gkehub/featuremembership_controller.go b/pkg/controller/direct/gkehub/featuremembership_controller.go index becfa08f4e..7063abd897 100644 --- a/pkg/controller/direct/gkehub/featuremembership_controller.go +++ b/pkg/controller/direct/gkehub/featuremembership_controller.go @@ -100,11 +100,10 @@ func (m *gkeHubModel) AdapterForObject(ctx context.Context, reader client.Reader if err != nil { return nil, err } - apiObj, err := featureMembershipSpecKRMtoMembershipFeatureSpecAPI(&obj.Spec) - if err != nil { + if err := resolveIAMReferences(ctx, reader, obj); err != nil { return nil, err } - err = setIAMReferences(ctx, reader, obj, apiObj) + apiObj, err := featureMembershipSpecKRMtoMembershipFeatureSpecAPI(&obj.Spec) if err != nil { return nil, err } @@ -118,45 +117,20 @@ func (m *gkeHubModel) AdapterForObject(ctx context.Context, reader client.Reader }, nil } -func setIAMReferences(ctx context.Context, reader client.Reader, obj *krm.GKEHubFeatureMembership, apiObj *featureapi.MembershipFeatureSpec) error { +func resolveIAMReferences(ctx context.Context, reader client.Reader, obj *krm.GKEHubFeatureMembership) error { spec := obj.Spec if spec.Configmanagement != nil && spec.Configmanagement.ConfigSync != nil { - if spec.Configmanagement.ConfigSync.MetricsGcpServiceAccountRef != nil { - val, err := resolveMetricsGcpServiceAccountRef(ctx, reader, spec.Configmanagement.ConfigSync.MetricsGcpServiceAccountRef, obj.GetNamespace()) - if err != nil { - return err - } - // play it safe here to check apiObj ref path exists. The path should be initialized in featureMembershipSpecKRMtoMembershipFeatureSpecAPI if the KRM fields not empty. - if apiObj.Configmanagement != nil && apiObj.Configmanagement.ConfigSync != nil { - apiObj.Configmanagement.ConfigSync.MetricsGcpServiceAccountEmail = val - } else { - return fmt.Errorf("apiObj is not initialized properly, expected to see apiObj.Configmanagement.ConfigSync not nil") - } + if err := spec.Configmanagement.ConfigSync.MetricsGcpServiceAccountRef.Resolve(ctx, reader, obj); err != nil { + return err } if spec.Configmanagement.ConfigSync.Git != nil { - if spec.Configmanagement.ConfigSync.Git.GcpServiceAccountRef != nil { - val, err := resolveGcpServiceAccountRef(ctx, reader, spec.Configmanagement.ConfigSync.Git.GcpServiceAccountRef, obj.GetNamespace()) - if err != nil { - return err - } - if apiObj.Configmanagement != nil && apiObj.Configmanagement.ConfigSync != nil && apiObj.Configmanagement.ConfigSync.Git != nil { - apiObj.Configmanagement.ConfigSync.Git.GcpServiceAccountEmail = val - } else { - return fmt.Errorf("apiObj is not initialized properly, expected to see apiObj.Configmanagement.ConfigSync.Git not nil") - } + if err := spec.Configmanagement.ConfigSync.Git.GcpServiceAccountRef.Resolve(ctx, reader, obj); err != nil { + return err } } if spec.Configmanagement.ConfigSync.Oci != nil { - if spec.Configmanagement.ConfigSync.Oci.GcpServiceAccountRef != nil { - val, err := resolveGcpServiceAccountRef(ctx, reader, spec.Configmanagement.ConfigSync.Oci.GcpServiceAccountRef, obj.GetNamespace()) - if err != nil { - return err - } - if apiObj.Configmanagement != nil && apiObj.Configmanagement.ConfigSync != nil && apiObj.Configmanagement.ConfigSync.Oci != nil { - apiObj.Configmanagement.ConfigSync.Oci.GcpServiceAccountEmail = val - } else { - return fmt.Errorf("apiObj is not initialized properly, expected to see apiObj.Configmanagement.ConfigSync.Oci not nil") - } + if err := spec.Configmanagement.ConfigSync.Oci.GcpServiceAccountRef.Resolve(ctx, reader, obj); err != nil { + return err } } } diff --git a/pkg/controller/direct/gkehub/references.go b/pkg/controller/direct/gkehub/references.go index 11bfabdd02..4222b0c8fd 100644 --- a/pkg/controller/direct/gkehub/references.go +++ b/pkg/controller/direct/gkehub/references.go @@ -20,7 +20,6 @@ import ( "strings" krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/gkehub/v1beta1" - "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -154,59 +153,3 @@ func resolveFeatureRef(ctx context.Context, reader client.Reader, obj *krm.GKEHu id: fmt.Sprintf("projects/%s/locations/%s/features/%s", projectID, featureLocation, featureName), }, nil } - -func resolveMetricsGcpServiceAccountRef(ctx context.Context, reader client.Reader, r *v1beta1.MetricsGcpServiceAccountRef, resourceNamespace string) (string, error) { - name := r.Name - namespace := r.Namespace - external := r.External - return getSAEmailWith(ctx, reader, name, namespace, external, resourceNamespace) -} - -func resolveGcpServiceAccountRef(ctx context.Context, reader client.Reader, r *v1beta1.GcpServiceAccountRef, resourceNamespace string) (string, error) { - name := r.Name - namespace := r.Namespace - external := r.External - return getSAEmailWith(ctx, reader, name, namespace, external, resourceNamespace) - -} - -func getSAEmailWith(ctx context.Context, reader client.Reader, name, namespace, external, resourceNamespace string) (string, error) { - if external != "" { - if name != "" { - return "", fmt.Errorf("cannot specify both name and external on IAMServiceAccount reference") - } - return external, nil - } - if name == "" { - return "", fmt.Errorf("must specify either name or external on IAMServiceAccount reference") - } - if namespace == "" { - namespace = resourceNamespace - } - - key := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - sa := &unstructured.Unstructured{} - sa.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "iam.cnrm.cloud.google.com", - Version: "v1beta1", - Kind: "IAMServiceAccount", - }) - if err := reader.Get(ctx, key, sa); err != nil { - if apierrors.IsNotFound(err) { - return "", fmt.Errorf("referenced IAMServiceAccount %v not found", key) - } - return "", fmt.Errorf("error reading referenced IAMServiceAccount %v: %w", key, err) - } - email, _, err := unstructured.NestedString(sa.Object, "status", "email") - if err != nil { - return "", fmt.Errorf("reading status.email from IAMServiceAccount %v: %w", key, err) - } - // if the status.email not populated, should we construct the email from spec.resourceID or metadata.name. - if email == "" { - return "", fmt.Errorf("status.email is empty from IAMServiceAccount %v, expected not-empty", key) - } - return email, nil -} From 3533b93e2664b2ee6110b1a3557f43d16215dce3 Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 21 Jun 2024 11:31:22 -0400 Subject: [PATCH 004/101] tests: add tests for monitoringalertpolicy severity --- .../_generated_object_monitoringalertpolicy.golden.yaml | 1 + .../monitoring/v1beta1/monitoringalertpolicy/_http.log | 8 +++++++- .../monitoring/v1beta1/monitoringalertpolicy/create.yaml | 1 + .../monitoring/v1beta1/monitoringalertpolicy/update.yaml | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_generated_object_monitoringalertpolicy.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_generated_object_monitoringalertpolicy.golden.yaml index de1458f779..abfb6c9fa3 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_generated_object_monitoringalertpolicy.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_generated_object_monitoringalertpolicy.golden.yaml @@ -51,6 +51,7 @@ spec: - name: monitoringnotificationchannel3-${uniqueId} - name: monitoringnotificationchannel1-${uniqueId} resourceID: ${alertPolicyId} + severity: ERROR status: conditions: - lastTransitionTime: "1970-01-01T00:00:00Z" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_http.log index 5c4b07b353..b472939747 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_http.log @@ -313,6 +313,7 @@ User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 t "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "WARNING", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" @@ -378,6 +379,7 @@ X-Xss-Protection: 0 "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "WARNING", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" @@ -449,6 +451,7 @@ X-Xss-Protection: 0 "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "WARNING", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" @@ -457,7 +460,7 @@ X-Xss-Protection: 0 --- -PATCH https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies/${alertPolicyID}?alt=json&updateMask=displayName%2Ccombiner%2Cenabled%2Cconditions%2CnotificationChannels%2Cdocumentation +PATCH https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies/${alertPolicyID}?alt=json&updateMask=displayName%2Ccombiner%2Cenabled%2Cconditions%2CnotificationChannels%2Cdocumentation%2Cseverity Content-Type: application/json User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager @@ -500,6 +503,7 @@ User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 t "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "ERROR", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" @@ -565,6 +569,7 @@ X-Xss-Protection: 0 "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "ERROR", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" @@ -636,6 +641,7 @@ X-Xss-Protection: 0 "projects/${projectId}/notificationChannels/${notificationChannelID}", "projects/${projectId}/notificationChannels/${notificationChannelID}" ], + "severity": "ERROR", "userLabels": { "cnrm-test": "true", "managed-by-cnrm": "true" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/create.yaml index 17f7f6db95..b77e4ea778 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/create.yaml @@ -19,6 +19,7 @@ metadata: spec: displayName: Test Alert Policy enabled: true + severity: WARNING notificationChannels: - name: monitoringnotificationchannel1-${uniqueId} - name: monitoringnotificationchannel2-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/update.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/update.yaml index 91223d46ba..277ea709ef 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/update.yaml @@ -19,6 +19,7 @@ metadata: spec: displayName: Updated Test Alert Policy enabled: false + severity: ERROR notificationChannels: - name: monitoringnotificationchannel3-${uniqueId} - name: monitoringnotificationchannel1-${uniqueId} From 74b8ee10212e215d078e31f4a2bd7381763af8ce Mon Sep 17 00:00:00 2001 From: justinsb Date: Mon, 24 Jun 2024 13:56:33 -0400 Subject: [PATCH 005/101] chore: update vcr tests for severity test change --- .../_vcr_cassettes/tf.yaml | 3194 +++++++++-------- 1 file changed, 1601 insertions(+), 1593 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_vcr_cassettes/tf.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_vcr_cassettes/tf.yaml index 7a9c69304e..c74dbff79f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_vcr_cassettes/tf.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringalertpolicy/_vcr_cassettes/tf.yaml @@ -15,1654 +15,1662 @@ --- version: 2 interactions: - - id: 0 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 196 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: | - {"displayName":"monitoringnotificationchannel2-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json - method: POST - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel2-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057951866", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:04.477480023Z" - }, - "mutationRecords": [ + - id: 0 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 196 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: | + {"displayName":"monitoringnotificationchannel3-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json + method: POST + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:04.477480023Z" + "type": "email", + "displayName": "monitoringnotificationchannel3-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/7726733045005565233", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 1.139551922s - - id: 1 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057951866?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel2-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057951866", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:04.477480023Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 1.326530731s + - id: 1 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:04.477480023Z" + "type": "email", + "displayName": "monitoringnotificationchannel3-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/7726733045005565233", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 153.770546ms - - id: 2 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057951866?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel2-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057951866", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:04.477480023Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 151.437665ms + - id: 2 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:04.477480023Z" + "type": "email", + "displayName": "monitoringnotificationchannel3-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/7726733045005565233", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 142.278104ms - - id: 3 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 196 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: | - {"displayName":"monitoringnotificationchannel1-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json - method: POST - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel1-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057950299", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:07.024284365Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 159.082224ms + - id: 3 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 196 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: | + {"displayName":"monitoringnotificationchannel2-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json + method: POST + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:07.024284365Z" + "type": "email", + "displayName": "monitoringnotificationchannel2-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774169", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 1.066897047s - - id: 4 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel1-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057950299", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:07.024284365Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 1.372780394s + - id: 4 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774169?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:07.024284365Z" + "type": "email", + "displayName": "monitoringnotificationchannel2-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774169", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 163.778818ms - - id: 5 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel1-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057950299", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:07.024284365Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 155.296135ms + - id: 5 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774169?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:07.024284365Z" + "type": "email", + "displayName": "monitoringnotificationchannel2-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774169", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 126.664097ms - - id: 6 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 196 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: | - {"displayName":"monitoringnotificationchannel3-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json - method: POST - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel3-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3527810009930327309", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.581882038Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 164.254463ms + - id: 6 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 196 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: | + {"displayName":"monitoringnotificationchannel1-41w3iydhydd0","enabled":true,"labels":{"email_address":"dev@example.com"},"type":"email","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels?alt=json + method: POST + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:09.581882038Z" + "type": "email", + "displayName": "monitoringnotificationchannel1-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774505", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 1.580690722s - - id: 7 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 2904 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: | - {"combiner":"AND_WITH_MATCHING_RESOURCE","conditions":[{"conditionThreshold":{"aggregations":[{"alignmentPeriod":"60s","crossSeriesReducer":"REDUCE_MEAN","groupByFields":["project","resource.label.instance_id","resource.label.zone"],"perSeriesAligner":"ALIGN_MAX"}],"comparison":"COMPARISON_GT","duration":"900s","filter":"metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"","thresholdValue":0.9,"trigger":{"count":1}},"displayName":"Very high CPU usage"}],"displayName":"Test Alert Policy","documentation":{"content":"Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.","mimeType":"text/markdown"},"enabled":true,"notificationChannels":["projects/example-project/notificationChannels/3903573194057950299","projects/example-project/notificationChannels/3903573194057951866"],"userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies?alt=json - method: POST - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Test Alert Policy", - "combiner": "AND_WITH_MATCHING_RESOURCE", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 1.354518258s + - id: 7 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_GT", - "thresholdValue": 0.9, - "duration": "900s", - "trigger": { - "count": 1 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" - ] - } - ] + "type": "email", + "displayName": "monitoringnotificationchannel1-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774505", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" }, - "displayName": "Very high CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + } + ] } - ], - "documentation": { - "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3903573194057950299", - "projects/example-project/notificationChannels/3903573194057951866" - ], - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 2.219920348s - - id: 8 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel3-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3527810009930327309", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.581882038Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 161.134144ms + - id: 8 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:09.581882038Z" + "type": "email", + "displayName": "monitoringnotificationchannel1-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774505", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 137.292348ms - - id: 9 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel3-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3527810009930327309", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.581882038Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 162.055084ms + - id: 9 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 2917 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: | + {"combiner":"AND_WITH_MATCHING_RESOURCE","conditions":[{"conditionThreshold":{"aggregations":[{"alignmentPeriod":"60s","crossSeriesReducer":"REDUCE_MEAN","groupByFields":["project","resource.label.instance_id","resource.label.zone"],"perSeriesAligner":"ALIGN_MAX"}],"comparison":"COMPARISON_GT","duration":"900s","filter":"metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"","thresholdValue":0.9,"trigger":{"count":1}},"displayName":"Very high CPU usage"}],"displayName":"Test Alert Policy","documentation":{"content":"Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.","mimeType":"text/markdown"},"enabled":true,"notificationChannels":["projects/example-project/notificationChannels/2866813153676774505","projects/example-project/notificationChannels/2866813153676774169"],"severity":"WARNING","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies?alt=json + method: POST + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:09.581882038Z" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Test Alert Policy", + "combiner": "AND_WITH_MATCHING_RESOURCE", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_GT", + "thresholdValue": 0.9, + "duration": "900s", + "trigger": { + "count": 1 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } + ] + }, + "displayName": "Very high CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/2866813153676774505", + "projects/example-project/notificationChannels/2866813153676774169" + ], + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "severity": "WARNING" } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 142.129299ms - - id: 10 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Test Alert Policy", - "combiner": "AND_WITH_MATCHING_RESOURCE", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 2.470854938s + - id: 10 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_GT", - "thresholdValue": 0.9, - "duration": "900s", - "trigger": { - "count": 1 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Test Alert Policy", + "combiner": "AND_WITH_MATCHING_RESOURCE", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_GT", + "thresholdValue": 0.9, + "duration": "900s", + "trigger": { + "count": 1 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very high CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/2866813153676774505", + "projects/example-project/notificationChannels/2866813153676774169" + ], + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" }, - "displayName": "Very high CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "enabled": true, + "severity": "WARNING" } - ], - "documentation": { - "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3903573194057950299", - "projects/example-project/notificationChannels/3903573194057951866" - ], - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 107.434096ms - - id: 11 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Test Alert Policy", - "combiner": "AND_WITH_MATCHING_RESOURCE", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 163.358924ms + - id: 11 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_GT", - "thresholdValue": 0.9, - "duration": "900s", - "trigger": { - "count": 1 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Test Alert Policy", + "combiner": "AND_WITH_MATCHING_RESOURCE", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_GT", + "thresholdValue": 0.9, + "duration": "900s", + "trigger": { + "count": 1 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very high CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/2866813153676774505", + "projects/example-project/notificationChannels/2866813153676774169" + ], + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" }, - "displayName": "Very high CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "enabled": true, + "severity": "WARNING" } - ], - "documentation": { - "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3903573194057950299", - "projects/example-project/notificationChannels/3903573194057951866" - ], - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 119.113709ms - - id: 12 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Test Alert Policy", - "combiner": "AND_WITH_MATCHING_RESOURCE", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 161.647634ms + - id: 12 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_GT", - "thresholdValue": 0.9, - "duration": "900s", - "trigger": { - "count": 1 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Test Alert Policy", + "combiner": "AND_WITH_MATCHING_RESOURCE", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_GT", + "thresholdValue": 0.9, + "duration": "900s", + "trigger": { + "count": 1 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very high CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", + "mimeType": "text/markdown" }, - "displayName": "Very high CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "notificationChannels": [ + "projects/example-project/notificationChannels/2866813153676774505", + "projects/example-project/notificationChannels/2866813153676774169" + ], + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "severity": "WARNING" } - ], - "documentation": { - "content": "Introduction to alerting\nAlerting gives timely awareness to problems in your cloud applications so you can resolve the problems quickly.\n\nTo create an alerting policy, you must describe the circumstances under which you want to be alerted and how you want to be notified. This page provides an overview of alerting policies and the concepts behind them.\n\nFor a practical introduction to alerting, try one of these quickstarts:\n\nQuickstart for GCP\nQuickstart for AWS\nFor an alerting policy that monitors usage and alerts you when you approach the threshold for billing, see Alerting on monthly log ingestion and Alerting on monthly trace span ingestion.\n\nHow does alerting work?\nYou can create and manage alerting policies with the Google Cloud Console, the Cloud Monitoring API, and Cloud SDK.\n\nEach alerting policy specifies the following:\n\nConditions that identify when a resource or a group of resources is in a state that requires you to take action. The conditions for an alerting policy are continuously monitored. You cannot configure the conditions to be monitored only for certain time periods.\n\nNotifications that are sent through email, SMS, or other channels to let your support team know when the conditions have been met. Configuring notifications is optional. For information on the available notification channels, see Notification options.\n\nDocumentation that can be included in some types of notifications to help your support team resolve the issue. Configuring documentation is optional.\n\nWhen the conditions of an alerting policy are met, Cloud Monitoring creates and displays an incident in the Google Cloud Console. If you set up notifications, Cloud Monitoring also sends notifications to people or third-party notification services. Responders can acknowledge receipt of the notification, but the incident remains open until the conditions that triggered the incident are no longer true.\n\nFor information and viewing and managing incidents by using the Google Cloud Console, see Incidents and events.", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3903573194057950299", - "projects/example-project/notificationChannels/3903573194057951866" - ], - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 89.134857ms - - id: 13 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 1321 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: | - {"combiner":"OR","conditions":[{"conditionThreshold":{"aggregations":[{"alignmentPeriod":"60s","crossSeriesReducer":"REDUCE_MEAN","groupByFields":["project","resource.label.instance_id","resource.label.zone"],"perSeriesAligner":"ALIGN_MAX"}],"comparison":"COMPARISON_LT","duration":"900s","filter":"metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"","thresholdValue":0.1,"trigger":{"count":3}},"displayName":"Very low CPU usage","name":"projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150"}],"displayName":"Updated Test Alert Policy","documentation":{"content":"“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”","mimeType":"text/markdown"},"enabled":false,"notificationChannels":["projects/example-project/notificationChannels/3527810009930327309","projects/example-project/notificationChannels/3903573194057950299"],"userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json&updateMask=displayName%2Ccombiner%2Cenabled%2Cconditions%2CnotificationChannels%2Cdocumentation - method: PATCH - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Updated Test Alert Policy", - "combiner": "OR", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:15.537652576Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 451.439217ms + - id: 13 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 1330 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: | + {"combiner":"OR","conditions":[{"conditionThreshold":{"aggregations":[{"alignmentPeriod":"60s","crossSeriesReducer":"REDUCE_MEAN","groupByFields":["project","resource.label.instance_id","resource.label.zone"],"perSeriesAligner":"ALIGN_MAX"}],"comparison":"COMPARISON_LT","duration":"900s","filter":"metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"","thresholdValue":0.1,"trigger":{"count":3}},"displayName":"Very low CPU usage","name":"projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623"}],"displayName":"Updated Test Alert Policy","documentation":{"content":"“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”","mimeType":"text/markdown"},"enabled":false,"notificationChannels":["projects/example-project/notificationChannels/7726733045005565233","projects/example-project/notificationChannels/2866813153676774505"],"severity":"ERROR","userLabels":{"cnrm-test":"true","managed-by-cnrm":"true"}} + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json&updateMask=displayName%2Ccombiner%2Cenabled%2Cconditions%2CnotificationChannels%2Cdocumentation%2Cseverity + method: PATCH + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_LT", - "thresholdValue": 0.1, - "duration": "900s", - "trigger": { - "count": 3 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Updated Test Alert Policy", + "combiner": "OR", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:26.035023344Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_LT", + "thresholdValue": 0.1, + "duration": "900s", + "trigger": { + "count": 3 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very low CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/7726733045005565233", + "projects/example-project/notificationChannels/2866813153676774505" + ], + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" }, - "displayName": "Very low CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "enabled": false, + "severity": "ERROR" } - ], - "documentation": { - "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3527810009930327309", - "projects/example-project/notificationChannels/3903573194057950299" - ], - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": false - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 3.60287779s - - id: 14 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Updated Test Alert Policy", - "combiner": "OR", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:15.537652576Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 5.533515959s + - id: 14 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_LT", - "thresholdValue": 0.1, - "duration": "900s", - "trigger": { - "count": 3 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Updated Test Alert Policy", + "combiner": "OR", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:26.035023344Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_LT", + "thresholdValue": 0.1, + "duration": "900s", + "trigger": { + "count": 3 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very low CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/7726733045005565233", + "projects/example-project/notificationChannels/2866813153676774505" + ], + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" }, - "displayName": "Very low CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "enabled": false, + "severity": "ERROR" } - ], - "documentation": { - "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3527810009930327309", - "projects/example-project/notificationChannels/3903573194057950299" - ], - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": false - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 97.019207ms - - id: 15 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Updated Test Alert Policy", - "combiner": "OR", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:15.537652576Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 458.882267ms + - id: 15 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_LT", - "thresholdValue": 0.1, - "duration": "900s", - "trigger": { - "count": 3 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Updated Test Alert Policy", + "combiner": "OR", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:26.035023344Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_LT", + "thresholdValue": 0.1, + "duration": "900s", + "trigger": { + "count": 3 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } ] - } - ] + }, + "displayName": "Very low CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", + "mimeType": "text/markdown" }, - "displayName": "Very low CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "notificationChannels": [ + "projects/example-project/notificationChannels/7726733045005565233", + "projects/example-project/notificationChannels/2866813153676774505" + ], + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": false, + "severity": "ERROR" } - ], - "documentation": { - "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3527810009930327309", - "projects/example-project/notificationChannels/3903573194057950299" - ], - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": false - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 101.256259ms - - id: 16 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "name": "projects/example-project/alertPolicies/2196633403401485933", - "displayName": "Updated Test Alert Policy", - "combiner": "OR", - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.751333324Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "mutationRecord": { - "mutateTime": "2024-04-25T01:50:15.537652576Z", - "mutatedBy": "integration-test@example-project.iam.gserviceaccount.com" - }, - "conditions": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 439.56287ms + - id: 16 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "conditionThreshold": { - "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", - "comparison": "COMPARISON_LT", - "thresholdValue": 0.1, - "duration": "900s", - "trigger": { - "count": 3 - }, - "aggregations": [ - { - "alignmentPeriod": "60s", - "perSeriesAligner": "ALIGN_MAX", - "crossSeriesReducer": "REDUCE_MEAN", - "groupByFields": [ - "project", - "resource.label.instance_id", - "resource.label.zone" - ] - } - ] + "type": "email", + "displayName": "monitoringnotificationchannel3-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/7726733045005565233", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:10.350354958Z" }, - "displayName": "Very low CPU usage", - "name": "projects/example-project/alertPolicies/2196633403401485933/conditions/2196633403401483150" + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + } + ] } - ], - "documentation": { - "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", - "mimeType": "text/markdown" - }, - "notificationChannels": [ - "projects/example-project/notificationChannels/3527810009930327309", - "projects/example-project/notificationChannels/3903573194057950299" - ], - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": false - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 103.78022ms - - id: 17 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel3-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3527810009930327309", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.581882038Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 157.198155ms + - id: 17 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774169?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:09.581882038Z" + "type": "email", + "displayName": "monitoringnotificationchannel2-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774169", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:13.229632439Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 137.306637ms - - id: 18 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057951866?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel2-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057951866", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:04.477480023Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 163.554454ms + - id: 18 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:04.477480023Z" + "type": "email", + "displayName": "monitoringnotificationchannel1-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774505", + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 145.9423ms - - id: 19 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel1-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057950299", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:07.024284365Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 162.912864ms + - id: 19 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:07.024284365Z" + "name": "projects/example-project/alertPolicies/17971456350742769146", + "displayName": "Updated Test Alert Policy", + "combiner": "OR", + "creationRecord": { + "mutateTime": "2024-06-24T17:53:19.490308857Z", + "mutatedBy": "justinsb@google.com" + }, + "mutationRecord": { + "mutateTime": "2024-06-24T17:53:26.035023344Z", + "mutatedBy": "justinsb@google.com" + }, + "conditions": [ + { + "conditionThreshold": { + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "comparison": "COMPARISON_LT", + "thresholdValue": 0.1, + "duration": "900s", + "trigger": { + "count": 3 + }, + "aggregations": [ + { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MAX", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ] + } + ] + }, + "displayName": "Very low CPU usage", + "name": "projects/example-project/alertPolicies/17971456350742769146/conditions/17971456350742768623" + } + ], + "documentation": { + "content": "“Just the place for a Snark!” the Bellman cried,\nAs he monitored his resources with care;\nSupporting each metric on the top of the tide\nBy a finger entwined in his hair.\n\n“Just the place for a Snark! I have measured it twice:\nThat alone should discourage the crew.\nJust the place for a Snark! I have measured it thrice:\nWhat I measure three times is true.”", + "mimeType": "text/markdown" + }, + "notificationChannels": [ + "projects/example-project/notificationChannels/7726733045005565233", + "projects/example-project/notificationChannels/2866813153676774505" + ], + "userLabels": { + "managed-by-cnrm": "true", + "cnrm-test": "true" + }, + "enabled": false, + "severity": "ERROR" } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 134.014546ms - - id: 20 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json&force=false - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: 0 - uncompressed: true - body: fake error message - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 400 Bad Request - code: 400 - duration: 150.495825ms - - id: 21 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json&force=false - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: 0 - uncompressed: true - body: fake error message - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 400 Bad Request - code: 400 - duration: 172.868164ms - - id: 22 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel3-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3527810009930327309", - "userLabels": { - "managed-by-cnrm": "true", - "cnrm-test": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:09.581882038Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 365.609732ms + - id: 20 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json&force=false + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: 0 + uncompressed: true + body: fake error message + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 400 Bad Request + code: 400 + duration: 162.254884ms + - id: 21 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json&force=false + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: 0 + uncompressed: true + body: fake error message + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 400 Bad Request + code: 400 + duration: 169.564414ms + - id: 22 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:09.581882038Z" + "type": "email", + "displayName": "monitoringnotificationchannel1-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/2866813153676774505", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:16.282199812Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 138.611295ms - - id: 23 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json - method: GET - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - { - "type": "email", - "displayName": "monitoringnotificationchannel1-41w3iydhydd0", - "labels": { - "email_address": "dev@example.com" - }, - "name": "projects/example-project/notificationChannels/3903573194057950299", - "userLabels": { - "cnrm-test": "true", - "managed-by-cnrm": "true" - }, - "enabled": true, - "creationRecord": { - "mutateTime": "2024-04-25T01:50:07.024284365Z" - }, - "mutationRecords": [ + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 138.324898ms + - id: 23 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | { - "mutateTime": "2024-04-25T01:50:07.024284365Z" + "type": "email", + "displayName": "monitoringnotificationchannel3-41w3iydhydd0", + "labels": { + "email_address": "dev@example.com" + }, + "name": "projects/example-project/notificationChannels/7726733045005565233", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + }, + "enabled": true, + "creationRecord": { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + }, + "mutationRecords": [ + { + "mutateTime": "2024-06-24T17:53:10.350354958Z" + } + ] } - ] - } - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 145.114514ms - - id: 24 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/2196633403401485933?alt=json - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - {} - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 3.615741138s - - id: 25 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057951866?alt=json&force=false - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - {} - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 4.359027108s - - id: 26 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3527810009930327309?alt=json&force=false - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - {} - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 1.042175534s - - id: 27 - request: - proto: HTTP/1.1 - proto_major: 1 - proto_minor: 1 - content_length: 0 - transfer_encoding: [] - trailer: {} - host: monitoring.googleapis.com - remote_addr: "" - request_uri: "" - body: "" - form: {} - headers: - Content-Type: - - application/json - url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/3903573194057950299?alt=json&force=false - method: DELETE - response: - proto: HTTP/2.0 - proto_major: 2 - proto_minor: 0 - transfer_encoding: [] - trailer: {} - content_length: -1 - uncompressed: true - body: | - {} - headers: - Content-Type: - - application/json; charset=UTF-8 - status: 200 OK - code: 200 - duration: 1.350267552s \ No newline at end of file + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 149.983056ms + - id: 24 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/alertPolicies/17971456350742769146?alt=json + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | + {} + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 2.446479002s + - id: 25 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774169?alt=json&force=false + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | + {} + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 3.253806355s + - id: 26 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/2866813153676774505?alt=json&force=false + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | + {} + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 1.34112753s + - id: 27 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: monitoring.googleapis.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + Content-Type: + - application/json + url: https://monitoring.googleapis.com/v3/projects/example-project/notificationChannels/7726733045005565233?alt=json&force=false + method: DELETE + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: true + body: | + {} + headers: + Content-Type: + - application/json; charset=UTF-8 + status: 200 OK + code: 200 + duration: 1.384605963s From d416b9e0738494ffa98f750f95721b37f6ea7e29 Mon Sep 17 00:00:00 2001 From: Hank Freund Date: Mon, 24 Jun 2024 13:04:00 -0700 Subject: [PATCH 006/101] Invoke the expander when a composition changes to update resources. When the composition controller reconciles a change in a composition, the desired outcome is that affected resources be re-processed to reflect the changes. Set up a side-channel to the expander and send events on it when compositions change. The expander will then enqueue the necessary events for all relevant resources. --- .../controller/composition_controller.go | 38 ++- .../controller/expander_reconciler.go | 42 +++- .../TestSimpleCompositionUpdate/input.yaml | 231 ++++++++++++++++++ .../modified_composition.yaml | 41 ++++ .../TestSimpleCompositionUpdate/output.yaml | 85 +++++++ .../tests/testcases/simple_test.go | 21 ++ 6 files changed, 439 insertions(+), 19 deletions(-) create mode 100644 experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/input.yaml create mode 100644 experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/modified_composition.yaml create mode 100644 experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/output.yaml diff --git a/experiments/compositions/composition/internal/controller/composition_controller.go b/experiments/compositions/composition/internal/controller/composition_controller.go index a5530da647..fb0b734149 100644 --- a/experiments/compositions/composition/internal/controller/composition_controller.go +++ b/experiments/compositions/composition/internal/controller/composition_controller.go @@ -29,6 +29,7 @@ import ( pb "google.com/composition/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,6 +41,7 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -48,9 +50,10 @@ var FacadeControllers sync.Map // CompositionReconciler reconciles a Composition object type CompositionReconciler struct { client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - mgr ctrl.Manager + Scheme *runtime.Scheme + Recorder record.EventRecorder + mgr ctrl.Manager + handoffChannels map[schema.GroupVersionKind]chan event.GenericEvent } //+kubebuilder:rbac:groups=composition.google.com,resources=compositions,verbs=get;list;watch;create;update;patch;delete @@ -332,21 +335,33 @@ func (r *CompositionReconciler) processComposition( logger.Info("Checking if Reconciler already exists for InputAPI CRD") _, loaded := FacadeControllers.LoadOrStore(gvk, true) if loaded { + logger.Info("Sending event to handoff channel") + r.handoffChannels[gvk] <- event.GenericEvent{ + Object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.Name, + Namespace: c.Namespace, + }, + }, + } + // Reconciler already exists nothing to be done logger.Info("Reconciler already exists for InputAPI CRD") return nil } logger.Info("Starting Reconciler for InputAPI CRD") + r.handoffChannels[gvk] = make(chan event.GenericEvent) expanderController := &ExpanderReconciler{ - Client: r.Client, - Recorder: r.mgr.GetEventRecorderFor(crd.Spec.Names.Plural + "-expander"), - Scheme: r.Scheme, - InputGVK: gvk, - Composition: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}, - InputGVR: gvk.GroupVersion().WithResource(crd.Spec.Names.Plural), - RESTMapper: r.mgr.GetRESTMapper(), - Config: r.mgr.GetConfig(), + Client: r.Client, + Recorder: r.mgr.GetEventRecorderFor(crd.Spec.Names.Plural + "-expander"), + Scheme: r.Scheme, + InputGVK: gvk, + Composition: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}, + InputGVR: gvk.GroupVersion().WithResource(crd.Spec.Names.Plural), + RESTMapper: r.mgr.GetRESTMapper(), + Config: r.mgr.GetConfig(), + CRDChangedWatcher: r.handoffChannels[gvk], } if err := expanderController.SetupWithManager(r.mgr, cr); err != nil { @@ -369,6 +384,7 @@ func (r *CompositionReconciler) processComposition( // SetupWithManager sets up the controller with the Manager. func (r *CompositionReconciler) SetupWithManager(mgr ctrl.Manager) error { r.mgr = mgr + r.handoffChannels = make(map[schema.GroupVersionKind]chan event.GenericEvent) return ctrl.NewControllerManagedBy(mgr). For(&compositionv1alpha1.Composition{}). Complete(r) diff --git a/experiments/compositions/composition/internal/controller/expander_reconciler.go b/experiments/compositions/composition/internal/controller/expander_reconciler.go index 45819f4d00..11f74bc51f 100644 --- a/experiments/compositions/composition/internal/controller/expander_reconciler.go +++ b/experiments/compositions/composition/internal/controller/expander_reconciler.go @@ -44,20 +44,25 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) // ExpanderReconciler reconciles a expander object type ExpanderReconciler struct { client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - RESTMapper meta.RESTMapper - Config *rest.Config - Dynamic *dynamic.DynamicClient - InputGVK schema.GroupVersionKind - InputGVR schema.GroupVersionResource - Composition types.NamespacedName + Scheme *runtime.Scheme + Recorder record.EventRecorder + RESTMapper meta.RESTMapper + Config *rest.Config + Dynamic *dynamic.DynamicClient + InputGVK schema.GroupVersionKind + InputGVR schema.GroupVersionResource + Composition types.NamespacedName + CRDChangedWatcher chan event.GenericEvent } type EvaluateWaitError struct { @@ -584,6 +589,26 @@ func (r *ExpanderReconciler) evaluateAndSavePlan(ctx context.Context, logger log return values, updated, "", nil } +func (r *ExpanderReconciler) enqueueAllFromGVK(ctx context.Context, _ client.Object) []reconcile.Request { + logger := log.FromContext(ctx) + logger.Info("Got notification of changed CRD") + inputcrList := &unstructured.UnstructuredList{} + inputcrList.SetGroupVersionKind(r.InputGVK) + if err := r.List(ctx, inputcrList); err != nil { + logger.Error(err, "unable to fetch Input API Objects") + return nil + } + if len(inputcrList.Items) == 0 { + return nil + } + var reqs []reconcile.Request + for _, inputcr := range inputcrList.Items { + nn := types.NamespacedName{Name: inputcr.GetName(), Namespace: inputcr.GetNamespace()} + reqs = append(reqs, reconcile.Request{NamespacedName: nn}) + } + return reqs +} + // SetupWithManager sets up the controller with the Manager. func (r *ExpanderReconciler) SetupWithManager(mgr ctrl.Manager, cr *unstructured.Unstructured) error { var err error @@ -601,6 +626,7 @@ func (r *ExpanderReconciler) SetupWithManager(mgr ctrl.Manager, cr *unstructured return ctrl.NewControllerManagedBy(mgr). For(cr). + WatchesRawSource(&source.Channel{Source: r.CRDChangedWatcher}, handler.EnqueueRequestsFromMapFunc(r.enqueueAllFromGVK)). WithOptions(controller.Options{RateLimiter: ratelimiter}). Complete(r) } diff --git a/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/input.yaml b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/input.yaml new file mode 100644 index 0000000000..908abb8fe8 --- /dev/null +++ b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/input.yaml @@ -0,0 +1,231 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: pconfigs.facade.updatetest.com +spec: + group: facade.updatetest.com + names: + kind: PConfig + listKind: PConfigList + plural: pconfigs + singular: pconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Schema for the pconfig + properties: + apiVersion: + description: api-version of api + type: string + kind: + description: gvk Kind + type: string + metadata: + type: object + spec: + description: PConfig spec + properties: + projects: + items: + type: string + type: array + required: + - projects + type: object + status: + description: PConfig status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sconfigs.facade.updatetest.com +spec: + group: facade.updatetest.com + names: + kind: SConfig + listKind: SConfigList + plural: sconfigs + singular: sconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Schema for the sconfig + properties: + apiVersion: + description: api-version of api + type: string + kind: + description: gvk Kind + type: string + metadata: + type: object + spec: + description: SConfig spec + properties: + projects: + items: + type: string + type: array + required: + - projects + type: object + status: + description: SConfig status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: composition-facade-updatetest +rules: +- apiGroups: + - facade.updatetest.com + resources: + - '*' + verbs: + - get + - list + - patch + - update + - watch + - create + - delete +- apiGroups: + - facade.updatetest.com + resources: + - "*/status" + verbs: + - get + - update +- apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: composition-facade-updatetest +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: composition-facade-updatetest +subjects: +- kind: ServiceAccount + name: composition-controller-manager + namespace: composition-system +--- +apiVersion: composition.google.com/v1alpha1 +kind: Composition +metadata: + name: pprojectconfigmap + namespace: default +spec: + inputAPIGroup: pconfigs.facade.updatetest.com + expanders: + - type: jinja2 + name: project + template: | + {% set hostProject = 'compositions-foobar' %} + {% for project in pconfigs.spec.projects %} + apiVersion: v1 + kind: ConfigMap + metadata: + name: p-{{ project }} + namespace: {{ pconfigs.metadata.namespace }} + labels: + createdby: "composition-namespaceconfigmap" + data: + name: {{ project }} + billingAccountRef: "010101-ABABCD-BCAB11" + folderRef: "000000111100" + --- + {% endfor %} +--- +apiVersion: composition.google.com/v1alpha1 +kind: Composition +metadata: + name: sprojectconfigmap + namespace: default +spec: + inputAPIGroup: sconfigs.facade.updatetest.com + expanders: + - type: jinja2 + name: project + template: | + {% set hostProject = 'compositions-foobar' %} + {% for project in sconfigs.spec.projects %} + apiVersion: v1 + kind: ConfigMap + metadata: + name: s-{{ project }} + namespace: {{ sconfigs.metadata.namespace }} + labels: + createdby: "composition-namespaceconfigmap" + data: + name: {{ project }} + billingAccountRef: "010101-ABABCD-BCAB11" + folderRef: "000000111100" + --- + {% endfor %} +--- +apiVersion: v1 +kind: Namespace +metadata: + name: team-a +--- +apiVersion: composition.google.com/v1alpha1 +kind: Context +metadata: + name: context + namespace: team-a +spec: + project: proj-a +--- +apiVersion: facade.updatetest.com/v1alpha1 +kind: PConfig +metadata: + name: team-a-config + namespace: team-a +spec: + projects: + - proj-a + - proj-b +--- +apiVersion: facade.updatetest.com/v1alpha1 +kind: SConfig +metadata: + name: team-a-config + namespace: team-a +spec: + projects: + - proj-a + - proj-b \ No newline at end of file diff --git a/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/modified_composition.yaml b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/modified_composition.yaml new file mode 100644 index 0000000000..7f819ddda5 --- /dev/null +++ b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/modified_composition.yaml @@ -0,0 +1,41 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +apiVersion: composition.google.com/v1alpha1 +kind: Composition +metadata: + name: pprojectconfigmap + namespace: default +spec: + inputAPIGroup: pconfigs.facade.updatetest.com + expanders: + - type: jinja2 + name: project + template: | + {% set hostProject = 'compositions-foobar' %} + {% for project in pconfigs.spec.projects %} + apiVersion: v1 + kind: ConfigMap + metadata: + name: p-{{ project }} + namespace: {{ pconfigs.metadata.namespace }} + labels: + createdby: "composition-namespaceconfigmap" + data: + name: {{ project }}-updated + billingAccountRef: "010101-ABABCD-BCAB11" + folderRef: "000000111100" + --- + {% endfor %} \ No newline at end of file diff --git a/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/output.yaml b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/output.yaml new file mode 100644 index 0000000000..87933f2971 --- /dev/null +++ b/experiments/compositions/composition/tests/data/TestSimpleCompositionUpdate/output.yaml @@ -0,0 +1,85 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +data: + billingAccountRef: 010101-ABABCD-BCAB11 + folderRef: "000000111100" + name: proj-a-updated +kind: ConfigMap +metadata: + labels: + createdby: composition-namespaceconfigmap + name: p-proj-a + namespace: team-a + ownerReferences: + - apiVersion: composition.google.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Plan + name: pconfigs-team-a-config +--- +apiVersion: v1 +data: + billingAccountRef: 010101-ABABCD-BCAB11 + folderRef: "000000111100" + name: proj-b-updated +kind: ConfigMap +metadata: + labels: + createdby: composition-namespaceconfigmap + name: p-proj-b + namespace: team-a + ownerReferences: + - apiVersion: composition.google.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Plan + name: pconfigs-team-a-config +--- +apiVersion: v1 +data: + billingAccountRef: 010101-ABABCD-BCAB11 + folderRef: "000000111100" + name: proj-a +kind: ConfigMap +metadata: + labels: + createdby: composition-namespaceconfigmap + name: s-proj-a + namespace: team-a + ownerReferences: + - apiVersion: composition.google.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Plan + name: sconfigs-team-a-config +--- +apiVersion: v1 +data: + billingAccountRef: 010101-ABABCD-BCAB11 + folderRef: "000000111100" + name: proj-b +kind: ConfigMap +metadata: + labels: + createdby: composition-namespaceconfigmap + name: s-proj-b + namespace: team-a + ownerReferences: + - apiVersion: composition.google.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Plan + name: sconfigs-team-a-config \ No newline at end of file diff --git a/experiments/compositions/composition/tests/testcases/simple_test.go b/experiments/compositions/composition/tests/testcases/simple_test.go index ecfb0fd456..4d79d0992c 100644 --- a/experiments/compositions/composition/tests/testcases/simple_test.go +++ b/experiments/compositions/composition/tests/testcases/simple_test.go @@ -50,6 +50,27 @@ func TestSimpleExpansionGrpc(t *testing.T) { s.VerifyOutputSpecMatches() } +func TestSimpleCompositionUpdate(t *testing.T) { + s := scenario.NewBasic(t) + defer s.Cleanup() + s.Setup() + + // Make sure the pre-update CRs have been created to ensure the coming update triggers a new reconcile. + cmNames := []string{"p-proj-a", "p-proj-b", "s-proj-a", "s-proj-b"} + cms := make([]*unstructured.Unstructured, 0) + for _, cmName := range cmNames { + cms = append(cms, utils.GetConfigMapObj("team-a", cmName)) + } + s.C.MustExist(cms, scenario.ExistTimeout) + + // Apply the modified Composition + s.ApplyManifests("modified composition", "modified_composition.yaml") + + // Changing the composition should trigger the expander to re-reconcile all objects. + s.VerifyOutputExists() + s.VerifyOutputSpecMatches() +} + func TestSimpleDeleteFacade(t *testing.T) { //t.Parallel() s := scenario.NewBasic(t) From 77a54d7ee080180890c57af7e80b0abb451cd62b Mon Sep 17 00:00:00 2001 From: justinsb Date: Tue, 18 Jun 2024 13:37:29 -0400 Subject: [PATCH 007/101] feat: Add support for export to MonitoringDashboard direct actuation --- mockgcp/mockmonitoring/dashboard.go | 77 +++++ pkg/controller/direct/export.go | 36 ++ pkg/controller/direct/monitoring/maputils.go | 20 +- .../monitoringdashboard_controller.go | 2 + pkg/controller/direct/registry/registry.go | 2 + ...ted_export_monitoringdashboardbasic.golden | 45 +++ ...bject_monitoringdashboardbasic.golden.yaml | 3 +- .../monitoringdashboardbasic/_http.log | 27 +- .../monitoringdashboardbasic/create.yaml | 6 +- .../monitoringdashboardbasic/update.yaml | 3 +- ...ated_export_monitoringdashboardfull.golden | 48 +++ ...object_monitoringdashboardfull.golden.yaml | 66 ++++ .../monitoringdashboardfull/_http.log | 315 ++++++++++++++++++ .../monitoringdashboardfull/create.yaml | 59 ++++ ...ated_export_monitoringdashboardrefs.golden | 45 +++ ...object_monitoringdashboardrefs.golden.yaml | 1 - .../monitoringdashboardrefs/_http.log | 6 - .../monitoringdashboardrefs/create.yaml | 1 - .../monitoringdashboardrefs/update.yaml | 1 - tests/e2e/export.go | 69 +++- 20 files changed, 771 insertions(+), 61 deletions(-) create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden diff --git a/mockgcp/mockmonitoring/dashboard.go b/mockgcp/mockmonitoring/dashboard.go index cfe97b9726..f41ade2345 100644 --- a/mockgcp/mockmonitoring/dashboard.go +++ b/mockgcp/mockmonitoring/dashboard.go @@ -22,6 +22,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" "github.com/GoogleCloudPlatform/k8s-config-connector/mockgcp/common/projects" pb "github.com/GoogleCloudPlatform/k8s-config-connector/mockgcp/generated/mockgcp/monitoring/dashboard/v1" @@ -67,6 +68,12 @@ func (s *DashboardsService) CreateDashboard(ctx context.Context, req *pb.CreateD defaulter := &dashboardDefaulter{} defaulter.visitDashboard(obj) + validator := &dashboardValidator{} + validator.visitDashboard(obj) + if len(validator.errors) > 0 { + return nil, status.Errorf(codes.InvalidArgument, "%v", validator.errors[0]) + } + obj.Name = fqn obj.Etag = computeEtag(obj) @@ -131,6 +138,70 @@ func (d *dashboardDefaulter) visitTextWidget(obj *pb.Widget_Text) { } } +type dashboardValidator struct { + errors []error +} + +func (d *dashboardValidator) errorf(format string, args ...interface{}) { + d.errors = append(d.errors, fmt.Errorf(format, args...)) +} + +func (d *dashboardValidator) visitDashboard(obj *pb.Dashboard) { + switch layout := obj.Layout.(type) { + case *pb.Dashboard_ColumnLayout: + d.visitColumnLayout(layout.ColumnLayout) + } +} + +func (d *dashboardValidator) visitColumnLayout(obj *pb.ColumnLayout) { + for _, column := range obj.Columns { + for _, widget := range column.Widgets { + d.visitWidget(widget) + } + } +} + +func (d *dashboardValidator) visitWidget(obj *pb.Widget) { + switch content := obj.Content.(type) { + case *pb.Widget_XyChart: + d.visitXYChartWidget(content.XyChart) + + case *pb.Widget_Scorecard: + d.visitScorecardWidget(content) + case *pb.Widget_Text: + d.visitTextWidget(content) + } +} + +func formatDuration(d *durationpb.Duration) string { + return fmt.Sprintf("%ds", d.Seconds) +} + +func (d *dashboardValidator) visitXYChartWidget(obj *pb.XyChart) { + timeshiftDuration := obj.TimeshiftDuration + if timeshiftDuration != nil && timeshiftDuration.AsDuration() != 0 { + if timeshiftDuration.Seconds < 60 { + // Should be columnLayout.columns[0].widgets[0].xyChart.timeshiftDuration ... + d.errorf("Field columnLayout.columns[].widgets[].xyChart.timeshiftDuration has an invalid value of %q: must be greater than or equal to one minute.", formatDuration(timeshiftDuration)) + return + } + + for _, dataSet := range obj.DataSets { + switch dataSet.GetPlotType() { + case pb.XyChart_DataSet_STACKED_BAR: + // TODO: Should be Field columnLayout.columns[0].widgets[2].xyChart.dataSets[0].plotType ... + d.errorf("Field columnLayout.columns[].widgets[].xyChart.dataSets[].plotType has an invalid value of %q: plot type is incompatible with XyChart's timeshiftDuration.", dataSet.GetPlotType()) + } + } + } +} + +func (d *dashboardValidator) visitScorecardWidget(obj *pb.Widget_Scorecard) { +} + +func (d *dashboardValidator) visitTextWidget(obj *pb.Widget_Text) { +} + func (s *DashboardsService) UpdateDashboard(ctx context.Context, req *pb.UpdateDashboardRequest) (*pb.Dashboard, error) { name, err := s.parseDashboardName(req.GetDashboard().GetName()) if err != nil { @@ -153,6 +224,12 @@ func (s *DashboardsService) UpdateDashboard(ctx context.Context, req *pb.UpdateD defaulter := &dashboardDefaulter{} defaulter.visitDashboard(updated) + validator := &dashboardValidator{} + validator.visitDashboard(updated) + if len(validator.errors) > 0 { + return nil, status.Errorf(codes.InvalidArgument, "%v", validator.errors[0]) + } + updated.Name = fqn updated.Etag = computeEtag(updated) diff --git a/pkg/controller/direct/export.go b/pkg/controller/direct/export.go index 3ec714a5d8..6139c8438c 100644 --- a/pkg/controller/direct/export.go +++ b/pkg/controller/direct/export.go @@ -64,5 +64,41 @@ func Export(ctx context.Context, url string, config *config.ControllerConfig) (* return u, nil } } + + //monitoring.googleapis.com/projects/PROJECT_NUMBER/dashboards/DASHBOARD_ID + if strings.HasPrefix(url, "//monitoring.googleapis.com/") { + tokens := strings.Split(strings.TrimPrefix(url, "//monitoring.googleapis.com/"), "/") + if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "dashboards" { + model, err := registry.GetModel(schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringDashboard"}) + if err != nil { + return nil, err + } + in := &unstructured.Unstructured{} + in.SetName(tokens[3]) + if err := unstructured.SetNestedField(in.Object, tokens[1], "spec", "projectRef", "external"); err != nil { + return nil, err + } + + var reader client.Reader // TODO: Create erroring reader? + a, err := model.AdapterForObject(ctx, reader, in) + if err != nil { + return nil, err + } + found, err := a.Find(ctx) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("resource %q is not found", url) + } + + u, err := a.Export(ctx) + if err != nil { + return nil, err + } + + return u, nil + } + } return nil, nil } diff --git a/pkg/controller/direct/monitoring/maputils.go b/pkg/controller/direct/monitoring/maputils.go index cd59b38a51..6ece8b2312 100644 --- a/pkg/controller/direct/monitoring/maputils.go +++ b/pkg/controller/direct/monitoring/maputils.go @@ -123,25 +123,18 @@ func Duration_ToProto(mapCtx *MapContext, in *string) *durationpb.Duration { return nil } - if strings.HasPrefix(s, "seconds:") { - v := strings.TrimPrefix(s, "seconds:") - d, err := time.ParseDuration(v + "s") + if strings.HasSuffix(s, "s") { + d, err := time.ParseDuration(s) if err != nil { - mapCtx.Errorf("parsing duration %q: %w", v, err) + mapCtx.Errorf("parsing duration %q: %w", s, err) return nil } out := durationpb.New(d) return out } - // TODO: Is this 1:1 with durationpb? - d, err := time.ParseDuration(s) - if err != nil { - mapCtx.Errorf("parsing duration %q: %w", s, err) - return nil - } - out := durationpb.New(d) - return out + mapCtx.Errorf("parsing duration %q, must end in s", s) + return nil } func Duration_FromProto(mapCtx *MapContext, in *durationpb.Duration) *string { @@ -149,7 +142,8 @@ func Duration_FromProto(mapCtx *MapContext, in *durationpb.Duration) *string { return nil } - s := in.String() + d := in.AsDuration() + s := fmt.Sprintf("%vs", d.Seconds()) return &s } diff --git a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go index 0330ddc2c7..c04c129a01 100644 --- a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go +++ b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go @@ -239,6 +239,8 @@ func (a *dashboardAdapter) Export(ctx context.Context) (*unstructured.Unstructur return nil, fmt.Errorf("error converting dashboard from API %w", err) } + spec.ProjectRef.External = a.projectID + specObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) if err != nil { return nil, fmt.Errorf("error converting dashboard spec to unstructured: %w", err) diff --git a/pkg/controller/direct/registry/registry.go b/pkg/controller/direct/registry/registry.go index 946356cfc1..3a12265fc5 100644 --- a/pkg/controller/direct/registry/registry.go +++ b/pkg/controller/direct/registry/registry.go @@ -102,6 +102,8 @@ func SupportsIAM(groupKind schema.GroupKind) (bool, error) { switch groupKind { case schema.GroupKind{Group: "logging.cnrm.cloud.google.com", Kind: "LoggingLogMetric"}: return false, nil + case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringDashboard"}: + return false, nil } return false, fmt.Errorf("groupKind %v is not recognized as a direct kind", groupKind) } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden new file mode 100644 index 0000000000..bbe56ec3d1 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden @@ -0,0 +1,45 @@ +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringDashboard +metadata: + name: monitoringdashboard-${uniqueId} +spec: + columnLayout: + columns: + - weight: 2 + widgets: + - title: Widget 1 + xyChart: + dataSets: + - plotType: LINE + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + timeshiftDuration: 0s + yAxis: + label: y1Axis + scale: LINEAR + - text: + content: Widget 2 + format: MARKDOWN + - title: Widget 3 + xyChart: + dataSets: + - plotType: LINE + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + yAxis: + label: y1Axis + scale: LINEAR + - logsPanel: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + title: Widget 4 + displayName: monitoringdashboard-updated + projectRef: + external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_object_monitoringdashboardbasic.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_object_monitoringdashboardbasic.golden.yaml index 81e1f8d473..2899418d31 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_object_monitoringdashboardbasic.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_object_monitoringdashboardbasic.golden.yaml @@ -36,14 +36,13 @@ spec: - title: Widget 3 xyChart: dataSets: - - plotType: STACKED_BAR + - plotType: LINE timeSeriesQuery: timeSeriesFilter: aggregation: perSeriesAligner: ALIGN_RATE filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" unitOverride: "1" - timeshiftDuration: 0s yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log index a81a02ff3a..c669dd807f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log @@ -52,7 +52,7 @@ x-goog-request-params: parent=projects%2F${projectId} } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "100s", "yAxis": { "label": "y1Axis", "scale": 1 @@ -70,7 +70,7 @@ x-goog-request-params: parent=projects%2F${projectId} "xyChart": { "dataSets": [ { - "plotType": 3, + "plotType": 1, "timeSeriesQuery": { "timeSeriesFilter": { "aggregation": { @@ -82,7 +82,7 @@ x-goog-request-params: parent=projects%2F${projectId} } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "60s", "yAxis": { "label": "y1Axis", "scale": 1 @@ -141,7 +141,7 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "100s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -160,7 +160,7 @@ X-Xss-Protection: 0 "xyChart": { "dataSets": [ { - "plotType": "STACKED_BAR", + "plotType": "LINE", "targetAxis": "Y1", "timeSeriesQuery": { "timeSeriesFilter": { @@ -173,7 +173,7 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "60s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -240,7 +240,7 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "100s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -259,7 +259,7 @@ X-Xss-Protection: 0 "xyChart": { "dataSets": [ { - "plotType": "STACKED_BAR", + "plotType": "LINE", "targetAxis": "Y1", "timeSeriesQuery": { "timeSeriesFilter": { @@ -272,7 +272,7 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", + "timeshiftDuration": "60s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -345,7 +345,7 @@ x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmon "xyChart": { "dataSets": [ { - "plotType": 3, + "plotType": 1, "timeSeriesQuery": { "timeSeriesFilter": { "aggregation": { @@ -357,7 +357,6 @@ x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmon } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": 1 @@ -432,7 +431,7 @@ X-Xss-Protection: 0 "xyChart": { "dataSets": [ { - "plotType": "STACKED_BAR", + "plotType": "LINE", "targetAxis": "Y1", "timeSeriesQuery": { "timeSeriesFilter": { @@ -445,7 +444,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -528,7 +526,7 @@ X-Xss-Protection: 0 "xyChart": { "dataSets": [ { - "plotType": "STACKED_BAR", + "plotType": "LINE", "targetAxis": "Y1", "timeSeriesQuery": { "timeSeriesFilter": { @@ -541,7 +539,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/create.yaml index 812543a9eb..c3f07e130f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/create.yaml @@ -32,7 +32,7 @@ spec: perSeriesAligner: "ALIGN_RATE" unitOverride: "1" plotType: LINE - timeshiftDuration: 0s + timeshiftDuration: 100s yAxis: label: y1Axis scale: LINEAR @@ -48,8 +48,8 @@ spec: aggregation: perSeriesAligner: ALIGN_RATE unitOverride: "1" - plotType: "STACKED_BAR" - timeshiftDuration: 0s + plotType: "LINE" + timeshiftDuration: 60s yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/update.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/update.yaml index 6b5d4d9b64..85e5ca13e9 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/update.yaml @@ -48,8 +48,7 @@ spec: aggregation: perSeriesAligner: ALIGN_RATE unitOverride: "1" - plotType: "STACKED_BAR" - timeshiftDuration: 0s + plotType: "LINE" yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden new file mode 100644 index 0000000000..b78484894e --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -0,0 +1,48 @@ +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringDashboard +metadata: + name: monitoringdashboard-${uniqueId} +spec: + columnLayout: + columns: + - weight: 2 + widgets: + - title: Widget 1 + xyChart: + dataSets: + - plotType: LINE + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + timeshiftDuration: 600.5s + yAxis: + label: y1Axis + scale: LINEAR + - text: + content: Widget 2 + format: MARKDOWN + - title: Widget 3 + xyChart: + dataSets: + - plotType: STACKED_BAR + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + yAxis: + label: y1Axis + scale: LINEAR + - logsPanel: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + resourceNames: + - external: projects/${projectId} + kind: Project + title: Widget 4 + displayName: monitoringdashboard-full + projectRef: + external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml new file mode 100644 index 0000000000..bb77be910a --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -0,0 +1,66 @@ +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringDashboard +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 2 + labels: + cnrm-test: "true" + name: monitoringdashboard-${uniqueId} + namespace: ${uniqueId} +spec: + columnLayout: + columns: + - weight: 2 + widgets: + - title: Widget 1 + xyChart: + dataSets: + - plotType: LINE + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + timeshiftDuration: 600.500s + yAxis: + label: y1Axis + scale: LINEAR + - text: + content: Widget 2 + format: MARKDOWN + - title: Widget 3 + xyChart: + dataSets: + - plotType: STACKED_BAR + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + yAxis: + label: y1Axis + scale: LINEAR + - logsPanel: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + resourceNames: + - external: projects/${projectId} + title: Widget 4 + displayName: monitoringdashboard-full + projectRef: + external: ${projectId} + resourceID: monitoringdashboard-${uniqueId} +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + etag: abcdef123456 + observedGeneration: 2 diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log new file mode 100644 index 0000000000..04691b8a38 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -0,0 +1,315 @@ +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Requested entity was not found.", + "status": "NOT_FOUND" + } +} + +--- + +POST https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: parent=projects%2F${projectId} + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": 1, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "text": { + "content": "Widget 2", + "format": 1 + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": 3, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +DELETE https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml new file mode 100644 index 0000000000..57d88c03ba --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -0,0 +1,59 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringDashboard +metadata: + name: monitoringdashboard-${uniqueId} +spec: + displayName: "monitoringdashboard-full" + columnLayout: + columns: + - weight: 2 + widgets: + - title: "Widget 1" + xyChart: + dataSets: + - timeSeriesQuery: + timeSeriesFilter: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + aggregation: + perSeriesAligner: "ALIGN_RATE" + unitOverride: "1" + plotType: LINE + timeshiftDuration: 600.500s + yAxis: + label: y1Axis + scale: LINEAR + - text: + content: "Widget 2" + format: "MARKDOWN" + - title: "Widget 3" + xyChart: + dataSets: + - timeSeriesQuery: + timeSeriesFilter: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + aggregation: + perSeriesAligner: ALIGN_RATE + unitOverride: "1" + plotType: "STACKED_BAR" + yAxis: + label: y1Axis + scale: LINEAR + - title: "Widget 4" + logsPanel: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + resourceNames: + - external: "projects/${projectId}" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden new file mode 100644 index 0000000000..f7ff2add8f --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden @@ -0,0 +1,45 @@ +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringDashboard +metadata: + name: monitoringdashboard-${uniqueId} +spec: + columnLayout: + columns: + - weight: 2 + widgets: + - title: Widget 1 + xyChart: + dataSets: + - plotType: LINE + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + timeshiftDuration: 0s + yAxis: + label: y1Axis + scale: LINEAR + - text: + content: Widget 2 + format: MARKDOWN + - title: Widget 3 + xyChart: + dataSets: + - plotType: STACKED_BAR + timeSeriesQuery: + timeSeriesFilter: + aggregation: + perSeriesAligner: ALIGN_RATE + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + unitOverride: "1" + yAxis: + label: y1Axis + scale: LINEAR + - logsPanel: + filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" + title: Widget 4 + displayName: monitoringdashboard updated + projectRef: + external: other${uniqueId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_object_monitoringdashboardrefs.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_object_monitoringdashboardrefs.golden.yaml index f3ebe5fcc7..2b0f27ce42 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_object_monitoringdashboardrefs.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_object_monitoringdashboardrefs.golden.yaml @@ -43,7 +43,6 @@ spec: perSeriesAligner: ALIGN_RATE filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" unitOverride: "1" - timeshiftDuration: 0s yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log index 7d05004891..095f6b14f9 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log @@ -234,7 +234,6 @@ x-goog-request-params: parent=projects%2Fother${uniqueId} } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": 1 @@ -325,7 +324,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -424,7 +422,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -509,7 +506,6 @@ x-goog-request-params: dashboard.name=projects%2Fother${uniqueId}%2Fdashboards%2 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": 1 @@ -597,7 +593,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -693,7 +688,6 @@ X-Xss-Protection: 0 } } ], - "timeshiftDuration": "0s", "yAxis": { "label": "y1Axis", "scale": "LINEAR" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/create.yaml index 1f03f8670b..d680d31481 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/create.yaml @@ -51,7 +51,6 @@ spec: perSeriesAligner: ALIGN_RATE unitOverride: "1" plotType: "STACKED_BAR" - timeshiftDuration: 0s yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/update.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/update.yaml index cde1f5c1e9..0ac5db79d3 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/update.yaml @@ -51,7 +51,6 @@ spec: perSeriesAligner: ALIGN_RATE unitOverride: "1" plotType: "STACKED_BAR" - timeshiftDuration: 0s yAxis: label: y1Axis scale: LINEAR diff --git a/tests/e2e/export.go b/tests/e2e/export.go index 8fd7f00c45..f9490bdc47 100644 --- a/tests/e2e/export.go +++ b/tests/e2e/export.go @@ -22,44 +22,35 @@ import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/cli/cmd/export" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" ) func exportResource(h *create.Harness, obj *unstructured.Unstructured) string { exportURI := "" - projectID := obj.GetAnnotations()["cnrm.cloud.google.com/project-id"] - if projectID == "" { - projectID = h.Project.ProjectID - } + projectID := resolveProjectID(h, obj) + resourceID, _, _ := unstructured.NestedString(obj.Object, "spec", "resourceID") if resourceID == "" { resourceID = obj.GetName() } // location, _, _ := unstructured.NestedString(obj.Object, "spec", "location") + // This list should match https://cloud.google.com/asset-inventory/docs/resource-name-format gvk := obj.GroupVersionKind() switch gvk.GroupKind() { case schema.GroupKind{Group: "serviceusage.cnrm.cloud.google.com", Kind: "Service"}: exportURI = "//serviceusage.googleapis.com/projects/" + projectID + "/services/" + resourceID - // case schema.GroupKind{Group: "certificatemanager.cnrm.cloud.google.com", Kind: "CertificateManagerCertificate"}: - // exportURI = "//certificatemanager.googleapis.com/projects/" + projectID + "/locations/" + location + "/certificates/" + resourceID - // case schema.GroupKind{Group: "certificatemanager.cnrm.cloud.google.com", Kind: "CertificateManagerCertificateMap"}: - // if location == "" { - // location = "global" - // } - // exportURI = "//certificatemanager.googleapis.com/projects/" + projectID + "/locations/" + location + "/certificateMaps/" + resourceID - // case schema.GroupKind{Group: "certificatemanager.cnrm.cloud.google.com", Kind: "CertificateManagerCertificateMapEntry"}: - // exportURI = "//certificatemanager.googleapis.com/projects/" + projectID + "/locations/" + location + "/certificateMaps/" + certificateMapID + "/certificateMapEntries/" + resourceID - // TODO: This does not work - // case schema.GroupKind{Group: "iam.cnrm.cloud.google.com", Kind: "IAMServiceAccount"}: - // name := obj.GetName() - // exportURI = "//iam.googleapis.com/projects/" + projectID + "/serviceAccounts/" + name + case schema.GroupKind{Group: "bigquery.cnrm.cloud.google.com", Kind: "BigQueryDataset"}: exportURI = "//bigquery.googleapis.com/projects/" + projectID + "/datasets/" + resourceID case schema.GroupKind{Group: "logging.cnrm.cloud.google.com", Kind: "LoggingLogMetric"}: exportURI = "//logging.googleapis.com/projects/" + projectID + "/metrics/" + resourceID + + case schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringDashboard"}: + exportURI = "//monitoring.googleapis.com/projects/" + projectID + "/dashboards/" + resourceID } if exportURI == "" { @@ -96,3 +87,47 @@ func exportResourceAsUnstructured(h *create.Harness, obj *unstructured.Unstructu } return u } + +func resolveProjectID(h *create.Harness, obj *unstructured.Unstructured) string { + projectRefExternal, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "external") + if projectRefExternal != "" { + tokens := strings.Split(projectRefExternal, "/") + if len(tokens) == 2 && tokens[0] == "projects" { + return tokens[1] + } + if len(tokens) == 1 { + return tokens[0] + } + h.Fatalf("invalid projectRef.external %q", projectRefExternal) + } + + projectRefName, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "name") + if projectRefName != "" { + projectRefNamespace, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "namespace") + + project := &unstructured.Unstructured{} + project.SetGroupVersionKind(schema.GroupVersionKind{Group: "resourcemanager.cnrm.cloud.google.com", Version: "v1beta1", Kind: "Project"}) + projectKey := types.NamespacedName{ + Name: projectRefName, + Namespace: projectRefNamespace, + } + if projectKey.Namespace == "" { + projectKey.Namespace = obj.GetNamespace() + } + if err := h.GetClient().Get(h.Ctx, projectKey, project); err != nil { + h.Fatalf("resolving projectRef: %v", err) + } + projectID, _, _ := unstructured.NestedString(project.Object, "spec", "resourceID") + if projectID == "" { + projectID = obj.GetName() + } + return projectID + } + + if projectID := obj.GetAnnotations()["cnrm.cloud.google.com/project-id"]; projectID != "" { + return projectID + } + + // Assume it's the namespace + return h.Project.ProjectID +} From 38fdbb1de93de23ddec675b87ed4a5fdb3071728 Mon Sep 17 00:00:00 2001 From: Hank Freund Date: Tue, 25 Jun 2024 06:26:43 -0700 Subject: [PATCH 008/101] Address review comments. --- .../controller/composition_controller.go | 18 +++++++-------- .../controller/expander_reconciler.go | 22 ++++++++++--------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/experiments/compositions/composition/internal/controller/composition_controller.go b/experiments/compositions/composition/internal/controller/composition_controller.go index fb0b734149..6915be3114 100644 --- a/experiments/compositions/composition/internal/controller/composition_controller.go +++ b/experiments/compositions/composition/internal/controller/composition_controller.go @@ -353,15 +353,15 @@ func (r *CompositionReconciler) processComposition( logger.Info("Starting Reconciler for InputAPI CRD") r.handoffChannels[gvk] = make(chan event.GenericEvent) expanderController := &ExpanderReconciler{ - Client: r.Client, - Recorder: r.mgr.GetEventRecorderFor(crd.Spec.Names.Plural + "-expander"), - Scheme: r.Scheme, - InputGVK: gvk, - Composition: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}, - InputGVR: gvk.GroupVersion().WithResource(crd.Spec.Names.Plural), - RESTMapper: r.mgr.GetRESTMapper(), - Config: r.mgr.GetConfig(), - CRDChangedWatcher: r.handoffChannels[gvk], + Client: r.Client, + Recorder: r.mgr.GetEventRecorderFor(crd.Spec.Names.Plural + "-expander"), + Scheme: r.Scheme, + InputGVK: gvk, + Composition: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}, + InputGVR: gvk.GroupVersion().WithResource(crd.Spec.Names.Plural), + RESTMapper: r.mgr.GetRESTMapper(), + Config: r.mgr.GetConfig(), + ComopsitionChangedWatcher: r.handoffChannels[gvk], } if err := expanderController.SetupWithManager(r.mgr, cr); err != nil { diff --git a/experiments/compositions/composition/internal/controller/expander_reconciler.go b/experiments/compositions/composition/internal/controller/expander_reconciler.go index 11f74bc51f..e97b94d2a3 100644 --- a/experiments/compositions/composition/internal/controller/expander_reconciler.go +++ b/experiments/compositions/composition/internal/controller/expander_reconciler.go @@ -54,15 +54,15 @@ import ( // ExpanderReconciler reconciles a expander object type ExpanderReconciler struct { client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - RESTMapper meta.RESTMapper - Config *rest.Config - Dynamic *dynamic.DynamicClient - InputGVK schema.GroupVersionKind - InputGVR schema.GroupVersionResource - Composition types.NamespacedName - CRDChangedWatcher chan event.GenericEvent + Scheme *runtime.Scheme + Recorder record.EventRecorder + RESTMapper meta.RESTMapper + Config *rest.Config + Dynamic *dynamic.DynamicClient + InputGVK schema.GroupVersionKind + InputGVR schema.GroupVersionResource + Composition types.NamespacedName + ComopsitionChangedWatcher chan event.GenericEvent } type EvaluateWaitError struct { @@ -602,6 +602,8 @@ func (r *ExpanderReconciler) enqueueAllFromGVK(ctx context.Context, _ client.Obj return nil } var reqs []reconcile.Request + // TODO: If there are lots of objects, this will result in very many reconciles. Have not tested to see how the + // queue copes with this. If it becomes a problem, this will need a rethink. for _, inputcr := range inputcrList.Items { nn := types.NamespacedName{Name: inputcr.GetName(), Namespace: inputcr.GetNamespace()} reqs = append(reqs, reconcile.Request{NamespacedName: nn}) @@ -626,7 +628,7 @@ func (r *ExpanderReconciler) SetupWithManager(mgr ctrl.Manager, cr *unstructured return ctrl.NewControllerManagedBy(mgr). For(cr). - WatchesRawSource(&source.Channel{Source: r.CRDChangedWatcher}, handler.EnqueueRequestsFromMapFunc(r.enqueueAllFromGVK)). + WatchesRawSource(&source.Channel{Source: r.ComopsitionChangedWatcher}, handler.EnqueueRequestsFromMapFunc(r.enqueueAllFromGVK)). WithOptions(controller.Options{RateLimiter: ratelimiter}). Complete(r) } From e2cb9262c1d0685ec419c4f95347d64cd112c60c Mon Sep 17 00:00:00 2001 From: justinsb Date: Tue, 25 Jun 2024 09:48:02 -0400 Subject: [PATCH 009/101] docs: update 1.120 release notes to document new fields --- docs/releasenotes/release-1.120.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 99c2534ad9..39cdd2e2da 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -21,6 +21,13 @@ output fields from GCP APIs are in `status.observedState.*` ## New Fields: +* `MonitoringAlertPolicy` + * Added `spec.severity` field. + +* `StorageBucket` + * Added `spec.softDeletePolicy` field. + * Added `status.observedState.softDeletePolicy` field. + * PlaceholderKind * Added `spec.placeholder` field. From ea18ba3fbaf79803909eacfdbf3da545690c9a2a Mon Sep 17 00:00:00 2001 From: justinsb Date: Tue, 25 Jun 2024 16:51:25 +0000 Subject: [PATCH 010/101] refactor: Refactor lifecyclehandler test to avoid circular dependency This test was using the full controller test machinery, which caused circular dependencies when we used the lifecycle handler in controllers. --- .../lifecyclehandler/handler_test.go | 24 +- pkg/test/kubeharness.go | 272 ++++++++++++++++++ 2 files changed, 281 insertions(+), 15 deletions(-) create mode 100644 pkg/test/kubeharness.go diff --git a/pkg/controller/lifecyclehandler/handler_test.go b/pkg/controller/lifecyclehandler/handler_test.go index a794c21d9f..cb328986eb 100644 --- a/pkg/controller/lifecyclehandler/handler_test.go +++ b/pkg/controller/lifecyclehandler/handler_test.go @@ -15,25 +15,19 @@ package lifecyclehandler import ( + "context" "errors" "testing" corekccv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/core/v1alpha1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test" - testcontroller "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test/controller" - testmain "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test/main" testvariable "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test/resourcefixture/variable" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -var ( - mgr manager.Manager ) func TestIsOrphaned(t *testing.T) { @@ -152,16 +146,20 @@ func TestIsOrphaned(t *testing.T) { isOrphaned: false, }, } - c := mgr.GetClient() + ctx := context.TODO() + h := test.NewKubeHarness(ctx, t) + c := h.GetClient() + + h.CreateDummyCRD("test1.cnrm.cloud.google.com", "v1alpha1", "Test1Foo") + h.CreateDummyCRD("test1.cnrm.cloud.google.com", "v1alpha1", "Test1Bar") + for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() testID := testvariable.NewUniqueID() tc.resource.SetNamespace(testID) - if err := testcontroller.EnsureNamespaceExists(c, testID); err != nil { - t.Fatal(err) - } + h.EnsureNamespaceExists(testID) if tc.parentObjectName != "" { references := []*unstructured.Unstructured{ test.NewBarUnstructured(tc.parentObjectName, testID, corev1.ConditionTrue), @@ -260,7 +258,3 @@ func Test_reasonForUnresolvableDeps(t *testing.T) { }) } } - -func TestMain(m *testing.M) { - testmain.ForUnitTests(m, &mgr) -} diff --git a/pkg/test/kubeharness.go b/pkg/test/kubeharness.go new file mode 100644 index 0000000000..16e5e8cb37 --- /dev/null +++ b/pkg/test/kubeharness.go @@ -0,0 +1,272 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + "sync" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/kubebuilder-declarative-pattern/mockkubeapiserver" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/dynamic" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/crd/crdloader" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/logging" +) + +// KubeHarness is a test harness that brings up a kube-apiserver (only). +type KubeHarness struct { + *testing.T + Ctx context.Context + + client client.Client + restConfig *rest.Config + + KubeEvents *MemoryEventSink +} + +// NewKubeHarness creates a new KubeHarness. +func NewKubeHarness(ctx context.Context, t *testing.T) *KubeHarness { + ctx, ctxCancel := context.WithCancel(ctx) + t.Cleanup(func() { + ctxCancel() + }) + log := log.FromContext(ctx) + + h := &KubeHarness{ + T: t, + Ctx: ctx, + } + + loadCRDs := true + if targetKube := os.Getenv("E2E_KUBE_TARGET"); targetKube == "envtest" || targetKube == "" { + env := &envtest.Environment{ + ControlPlaneStartTimeout: time.Minute, + ControlPlaneStopTimeout: time.Minute, + } + + h.Logf("starting envtest apiserver") + restConfig, err := env.Start() + if err != nil { + h.Fatalf("error starting test environment: %v", err) + } + + t.Cleanup(func() { + if err := env.Stop(); err != nil { + h.Errorf("error stopping envtest environment: %v", err) + } + }) + + h.restConfig = restConfig + } else if targetKube := os.Getenv("E2E_KUBE_TARGET"); targetKube == "mock" { + k8s, err := mockkubeapiserver.NewMockKubeAPIServer(":0") + if err != nil { + h.Fatalf("error building mock kube-apiserver: %v", err) + } + + addr, err := k8s.StartServing() + if err != nil { + h.Fatalf("error starting mock kube-apiserver: %v", err) + } + + t.Cleanup(func() { + if err := k8s.Stop(); err != nil { + h.Errorf("error stopping mock kube-apiserver: %v", err) + } + }) + + h.restConfig = &rest.Config{ + Host: addr.String(), + ContentConfig: rest.ContentConfig{ + ContentType: "application/json", + }, + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + } else { + t.Fatalf("E2E_KUBE_TARGET=%q not supported", targetKube) + } + + // Set up logging of k8s requests + logKubeRequests := true + if logKubeRequests { + eventSinks := EventSinksFromContext(ctx) + kubeEvents := NewMemoryEventSink() + h.KubeEvents = kubeEvents + + eventSinks = append(eventSinks, kubeEvents) + + wrapTransport := func(rt http.RoundTripper) http.RoundTripper { + t := NewHTTPRecorder(rt, eventSinks...) + return t + } + h.restConfig.Wrap(wrapTransport) + } + + if h.client == nil { + client, err := client.New(h.restConfig, client.Options{}) + if err != nil { + h.Fatalf("error building client: %v", err) + } + h.client = client + } + + logging.SetupLogger() + + if loadCRDs { + crds, err := crdloader.LoadAllCRDs() + if err != nil { + h.Fatalf("error loading crds: %v", err) + } + { + var wg sync.WaitGroup + var errsMutex sync.Mutex + var errs []error + + for i := range crds { + crd := &crds[i] + wg.Add(1) + log.V(2).Info("loading crd", "name", crd.GetName()) + + go func() { + defer wg.Done() + if err := h.client.Create(ctx, crd.DeepCopy()); err != nil { + errsMutex.Lock() + defer errsMutex.Unlock() + errs = append(errs, fmt.Errorf("error creating crd %v: %w", crd.GroupVersionKind(), err)) + } + h.waitForCRDReady(crd) + }() + } + wg.Wait() + if len(errs) != 0 { + h.Fatalf("error creating crds: %v", errors.Join(errs...)) + } + } + } + + return h +} + +func (h *KubeHarness) GetClient() client.Client { + return h.client +} + +func (h *KubeHarness) GetRESTConfig() *rest.Config { + return h.restConfig +} + +func (h *KubeHarness) waitForCRDReady(obj client.Object) { + logger := log.FromContext(h.Ctx) + + apiVersion, kind := obj.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind() + name := obj.GetName() + namespace := obj.GetNamespace() + + id := types.NamespacedName{Name: name, Namespace: namespace} + if err := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) { + u := &unstructured.Unstructured{} + u.SetAPIVersion(apiVersion) + u.SetKind(kind) + logger.V(2).Info("Testing to see if resource is ready", "kind", kind, "id", id) + if err := h.GetClient().Get(h.Ctx, id, u); err != nil { + logger.Info("Error getting resource", "kind", kind, "id", id, "error", err) + return false, err + } + objectStatus := dynamic.GetObjectStatus(h.T, u) + // CRDs do not have observedGeneration + for _, condition := range objectStatus.Conditions { + if condition.Type == "Established" && condition.Status == "True" { + logger.V(2).Info("crd is ready", "kind", kind, "id", id) + return true, nil + } + } + // This resource is not completely ready. Let's keep polling. + logger.V(2).Info("CRD is not ready", "kind", kind, "id", id, "conditions", objectStatus.Conditions) + return false, nil + }); err != nil { + h.Errorf("error while polling for ready on %v %v: %v", kind, id, err) + return + } +} + +func (h *KubeHarness) MustReadFile(p string) []byte { + return MustReadFile(h.T, p) +} + +func (h *KubeHarness) EnsureNamespaceExists(name string) { + ctx := h.Ctx + ns := &corev1.Namespace{} + ns.SetName(name) + if err := h.GetClient().Create(ctx, ns); err != nil { + if !apierrors.IsAlreadyExists(err) { + h.Fatalf("error creating namespace %v: %v", name, err) + } + } +} + +// CreateDummyCRD registers a CRD so we can create objects in tests +func (h *KubeHarness) CreateDummyCRD(group, version, kind string) { + ctx := h.Ctx + + resource := strings.ToLower(kind) + "s" + crd := &apiextensions.CustomResourceDefinition{} + crd.SetGroupVersionKind(apiextensions.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + + crd.SetName(resource + "." + group) + crd.Spec.Group = group + crd.Spec.Names.Kind = kind + crd.Spec.Names.Plural = resource + crd.Spec.Scope = apiextensions.NamespaceScoped + + crd.Spec.Versions = append(crd.Spec.Versions, apiextensions.CustomResourceDefinitionVersion{ + Name: version, + Served: true, + Storage: true, + Schema: &apiextensions.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensions.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensions.JSONSchemaProps{ + "spec": { + Type: "object", + }, + }, + }, + }, + }) + + if err := h.client.Create(ctx, crd); err != nil { + h.Fatalf("error creating crd %v: %v", crd.GroupVersionKind(), err) + } + crd.SetGroupVersionKind(apiextensions.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + h.waitForCRDReady(crd) +} From 8306dfd5d7fdf63566b56b02f35d617df55b7e8d Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 21 Jun 2024 18:16:57 -0400 Subject: [PATCH 011/101] tests: add more StorageBucket tests Covering softDeletePolicy.retentionDurationSeconds in particular. --- ...ated_object_storagebucketbasic.golden.yaml | 39 ++ .../storagebucketbasic/_http.log | 412 +++++++++++++++++ .../{ => storagebucketbasic}/create.yaml | 0 .../{ => storagebucketbasic}/update.yaml | 0 ...object_storagebucketsoftdelete.golden.yaml | 47 ++ .../storagebucketsoftdelete/_http.log | 432 ++++++++++++++++++ .../storagebucketsoftdelete/create.yaml | 30 ++ .../storagebucketsoftdelete/update.yaml | 25 + ...rated_object_storagebucketzero.golden.yaml | 41 ++ .../{ => storagebucketzero}/_http.log | 201 +------- .../storagebucketzero/create.yaml | 30 ++ .../storagebucketzero/update.yaml | 26 ++ ...generated_object_storagebucket.golden.yaml | 7 +- 13 files changed, 1091 insertions(+), 199 deletions(-) create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_http.log rename pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/{ => storagebucketbasic}/create.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/{ => storagebucketbasic}/update.yaml (100%) create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_http.log create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/update.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml rename pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/{ => storagebucketzero}/_http.log (66%) create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/update.yaml rename pkg/test/resourcefixture/testdata/{basic/storage/v1beta1 => reconcileintervalannotations}/storagebucket/_generated_object_storagebucket.golden.yaml (88%) diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml new file mode 100644 index 0000000000..bf2b29e6ab --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml @@ -0,0 +1,39 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 5 + labels: + cnrm-test: "true" + label-one: value-one + newkey: newval + name: storagebucket-sample-${uniqueId} + namespace: ${uniqueId} +spec: + location: US + publicAccessPrevention: inherited + resourceID: storagebucket-sample-${uniqueId} + softDeletePolicy: + retentionDurationSeconds: 0 + storageClass: STANDARD + versioning: + enabled: true +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 5 + observedState: + softDeletePolicy: + retentionDurationSeconds: 0 + selfLink: https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId} + url: gs://storagebucket-sample-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_http.log b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_http.log new file mode 100644 index 0000000000..c0e05c2d43 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_http.log @@ -0,0 +1,412 @@ +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +404 Not Found +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "The specified bucket does not exist.", + "reason": "notFound" + } + ], + "message": "The specified bucket does not exist." + } +} + +--- + +POST https://storage.googleapis.com/storage/v1/b?alt=json&prettyPrint=false&project=${projectId} +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "iamConfiguration": { + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "name": "storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "versioning": { + "enabled": false + } +} + +200 OK +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "1", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": false + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "1", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": false + } +} + +--- + +PATCH https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "lifecycle": { + "rule": [] + }, + "softDeletePolicy": { + "retentionDurationSeconds": "0" + }, + "versioning": { + "enabled": true + } +} + +200 OK +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "acl": [ + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-owners-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-owners-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "owners" + }, + "role": "OWNER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-owners-${projectNumber}" + }, + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-editors-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-editors-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "editors" + }, + "role": "OWNER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-editors-${projectNumber}" + }, + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-viewers-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-viewers-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "viewers" + }, + "role": "READER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-viewers-${projectNumber}" + } + ], + "defaultObjectAcl": [ + { + "entity": "project-owners-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "owners" + }, + "role": "OWNER" + }, + { + "entity": "project-editors-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "editors" + }, + "role": "OWNER" + }, + { + "entity": "project-viewers-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "viewers" + }, + "role": "READER" + } + ], + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "2", + "name": "storagebucket-sample-${uniqueId}", + "owner": { + "entity": "project-owners-${projectNumber}" + }, + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "retentionDurationSeconds": "0" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": true + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "2", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "retentionDurationSeconds": "0" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": true + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/o?alt=json&prettyPrint=false&versions=true +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "kind": "storage#objects" +} + +--- + +DELETE https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +204 No Content +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/create.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/create.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/create.yaml rename to pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/create.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/update.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/update.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/update.yaml rename to pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/update.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml new file mode 100644 index 0000000000..c52ed4bbb3 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml @@ -0,0 +1,47 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 3 + labels: + cnrm-test: "true" + label-one: value-one + newkey: newval + name: storagebucket-sample-${uniqueId} + namespace: ${uniqueId} +spec: + lifecycleRule: + - action: + type: Delete + condition: + age: 7 + withState: ANY + location: US + publicAccessPrevention: inherited + resourceID: storagebucket-sample-${uniqueId} + softDeletePolicy: + effectiveTime: "1970-01-01T00:00:00Z" + retentionDurationSeconds: 604800 + storageClass: STANDARD + versioning: + enabled: false +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 3 + observedState: + softDeletePolicy: + effectiveTime: "1970-01-01T00:00:00Z" + retentionDurationSeconds: 604800 + selfLink: https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId} + url: gs://storagebucket-sample-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_http.log b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_http.log new file mode 100644 index 0000000000..b9546e5fc9 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_http.log @@ -0,0 +1,432 @@ +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +404 Not Found +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "The specified bucket does not exist.", + "reason": "notFound" + } + ], + "message": "The specified bucket does not exist." + } +} + +--- + +POST https://storage.googleapis.com/storage/v1/b?alt=json&prettyPrint=false&project=${projectId} +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "iamConfiguration": { + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "name": "storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "versioning": { + "enabled": true + } +} + +200 OK +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "1", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": true + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "1", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": true + } +} + +--- + +PATCH https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "versioning": { + "enabled": false + } +} + +200 OK +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json; charset=UTF-8 +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "acl": [ + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-owners-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-owners-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "owners" + }, + "role": "OWNER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-owners-${projectNumber}" + }, + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-editors-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-editors-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "editors" + }, + "role": "OWNER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-editors-${projectNumber}" + }, + { + "bucket": "storagebucket-sample-${uniqueId}", + "entity": "project-viewers-${projectNumber}", + "etag": "abcdef0123A", + "id": "storagebucket-sample-${uniqueId}/project-viewers-${projectNumber}", + "kind": "storage#bucketAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "viewers" + }, + "role": "READER", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-viewers-${projectNumber}" + } + ], + "defaultObjectAcl": [ + { + "entity": "project-owners-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "owners" + }, + "role": "OWNER" + }, + { + "entity": "project-editors-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "editors" + }, + "role": "OWNER" + }, + { + "entity": "project-viewers-${projectNumber}", + "etag": "abcdef0123A=", + "kind": "storage#objectAccessControl", + "projectTeam": { + "projectNumber": "${projectNumber}", + "team": "viewers" + }, + "role": "READER" + } + ], + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "2", + "name": "storagebucket-sample-${uniqueId}", + "owner": { + "entity": "project-owners-${projectNumber}" + }, + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": false + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "etag": "abcdef0123A=", + "iamConfiguration": { + "bucketPolicyOnly": { + "enabled": false + }, + "publicAccessPrevention": "inherited", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "id": "000000000000000000000", + "kind": "storage#bucket", + "labels": { + "cnrm-test": "true", + "label-one": "value-one", + "managed-by-cnrm": "true", + "newkey": "newval" + }, + "lifecycle": { + "rule": [ + { + "action": { + "type": "Delete" + }, + "condition": { + "age": 7 + } + } + ] + }, + "location": "US", + "locationType": "multi-region", + "metageneration": "2", + "name": "storagebucket-sample-${uniqueId}", + "projectNumber": "${projectNumber}", + "rpo": "DEFAULT", + "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", + "softDeletePolicy": { + "effectiveTime": "2024-04-01T12:34:56.123456Z", + "retentionDurationSeconds": "604800" + }, + "storageClass": "STANDARD", + "timeCreated": "2024-04-01T12:34:56.123456Z", + "updated": "2024-04-01T12:34:56.123456Z", + "versioning": { + "enabled": false + } +} + +--- + +GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/o?alt=json&prettyPrint=false&versions=true +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private, max-age=0, must-revalidate, no-transform +Content-Type: application/json; charset=UTF-8 +Expires: {now+0m} +Server: UploadServer +Vary: Origin +Vary: X-Origin + +{ + "kind": "storage#objects" +} + +--- + +DELETE https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +204 No Content +Cache-Control: no-cache, no-store, max-age=0, must-revalidate +Content-Type: application/json +Expires: Mon, 01 Jan 1990 00:00:00 GMT +Pragma: no-cache +Server: UploadServer +Vary: Origin +Vary: X-Origin \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/create.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/create.yaml new file mode 100644 index 0000000000..dcd0ef40ec --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/create.yaml @@ -0,0 +1,30 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + labels: + label-one: "value-one" + name: storagebucket-sample-${uniqueId} +spec: + versioning: + enabled: true + lifecycleRule: + - action: + type: Delete + condition: + age: 7 + softDeletePolicy: + retentionDurationSeconds: 604800 diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/update.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/update.yaml new file mode 100644 index 0000000000..c2dce9835d --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/update.yaml @@ -0,0 +1,25 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + labels: + label-one: "value-one" + newkey: "newval" + name: storagebucket-sample-${uniqueId} +spec: + versioning: + enabled: false + \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml new file mode 100644 index 0000000000..4109c7a187 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml @@ -0,0 +1,41 @@ +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 4 + labels: + cnrm-test: "true" + label-one: value-one + newkey: newval + name: storagebucket-sample-${uniqueId} + namespace: ${uniqueId} +spec: + location: US + publicAccessPrevention: inherited + resourceID: storagebucket-sample-${uniqueId} + softDeletePolicy: + effectiveTime: "1970-01-01T00:00:00Z" + retentionDurationSeconds: 604800 + storageClass: STANDARD + versioning: + enabled: false +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 4 + observedState: + softDeletePolicy: + effectiveTime: "1970-01-01T00:00:00Z" + retentionDurationSeconds: 604800 + selfLink: https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId} + url: gs://storagebucket-sample-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_http.log b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_http.log similarity index 66% rename from pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_http.log rename to pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_http.log index 52906c37ea..b80f325a61 100644 --- a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_http.log @@ -60,7 +60,7 @@ User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terr }, "storageClass": "STANDARD", "versioning": { - "enabled": false + "enabled": true } } @@ -118,7 +118,7 @@ Vary: X-Origin "timeCreated": "2024-04-01T12:34:56.123456Z", "updated": "2024-04-01T12:34:56.123456Z", "versioning": { - "enabled": false + "enabled": true } } @@ -180,7 +180,7 @@ Vary: X-Origin "timeCreated": "2024-04-01T12:34:56.123456Z", "updated": "2024-04-01T12:34:56.123456Z", "versioning": { - "enabled": false + "enabled": true } } @@ -200,11 +200,8 @@ User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terr "lifecycle": { "rule": [] }, - "softDeletePolicy": { - "retentionDurationSeconds": "0" - }, "versioning": { - "enabled": true + "enabled": false } } @@ -327,7 +324,7 @@ Vary: X-Origin "timeCreated": "2024-04-01T12:34:56.123456Z", "updated": "2024-04-01T12:34:56.123456Z", "versioning": { - "enabled": true + "enabled": false } } @@ -378,193 +375,7 @@ Vary: X-Origin "timeCreated": "2024-04-01T12:34:56.123456Z", "updated": "2024-04-01T12:34:56.123456Z", "versioning": { - "enabled": true - } -} - ---- - -PATCH https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false -Content-Type: application/json -User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager - -{ - "softDeletePolicy": { - "retentionDurationSeconds": "0" - } -} - -200 OK -Cache-Control: no-cache, no-store, max-age=0, must-revalidate -Content-Type: application/json; charset=UTF-8 -Expires: Mon, 01 Jan 1990 00:00:00 GMT -Pragma: no-cache -Server: UploadServer -Vary: Origin -Vary: X-Origin - -{ - "acl": [ - { - "bucket": "storagebucket-sample-${uniqueId}", - "entity": "project-owners-${projectNumber}", - "etag": "abcdef0123A", - "id": "storagebucket-sample-${uniqueId}/project-owners-${projectNumber}", - "kind": "storage#bucketAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "owners" - }, - "role": "OWNER", - "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-owners-${projectNumber}" - }, - { - "bucket": "storagebucket-sample-${uniqueId}", - "entity": "project-editors-${projectNumber}", - "etag": "abcdef0123A", - "id": "storagebucket-sample-${uniqueId}/project-editors-${projectNumber}", - "kind": "storage#bucketAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "editors" - }, - "role": "OWNER", - "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-editors-${projectNumber}" - }, - { - "bucket": "storagebucket-sample-${uniqueId}", - "entity": "project-viewers-${projectNumber}", - "etag": "abcdef0123A", - "id": "storagebucket-sample-${uniqueId}/project-viewers-${projectNumber}", - "kind": "storage#bucketAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "viewers" - }, - "role": "READER", - "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}/acl/project-viewers-${projectNumber}" - } - ], - "defaultObjectAcl": [ - { - "entity": "project-owners-${projectNumber}", - "etag": "abcdef0123A=", - "kind": "storage#objectAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "owners" - }, - "role": "OWNER" - }, - { - "entity": "project-editors-${projectNumber}", - "etag": "abcdef0123A=", - "kind": "storage#objectAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "editors" - }, - "role": "OWNER" - }, - { - "entity": "project-viewers-${projectNumber}", - "etag": "abcdef0123A=", - "kind": "storage#objectAccessControl", - "projectTeam": { - "projectNumber": "${projectNumber}", - "team": "viewers" - }, - "role": "READER" - } - ], - "etag": "abcdef0123A=", - "iamConfiguration": { - "bucketPolicyOnly": { - "enabled": false - }, - "publicAccessPrevention": "inherited", - "uniformBucketLevelAccess": { - "enabled": false - } - }, - "id": "000000000000000000000", - "kind": "storage#bucket", - "labels": { - "cnrm-test": "true", - "label-one": "value-one", - "managed-by-cnrm": "true", - "newkey": "newval" - }, - "location": "US", - "locationType": "multi-region", - "metageneration": "3", - "name": "storagebucket-sample-${uniqueId}", - "owner": { - "entity": "project-owners-${projectNumber}" - }, - "projectNumber": "${projectNumber}", - "rpo": "DEFAULT", - "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", - "softDeletePolicy": { - "effectiveTime": "2024-04-01T12:34:56.123456Z", - "retentionDurationSeconds": "604800" - }, - "storageClass": "STANDARD", - "timeCreated": "2024-04-01T12:34:56.123456Z", - "updated": "2024-04-01T12:34:56.123456Z", - "versioning": { - "enabled": true - } -} - ---- - -GET https://storage.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}?alt=json&prettyPrint=false -User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager - -200 OK -Cache-Control: private, max-age=0, must-revalidate, no-transform -Content-Type: application/json; charset=UTF-8 -Expires: {now+0m} -Server: UploadServer -Vary: Origin -Vary: X-Origin - -{ - "etag": "abcdef0123A=", - "iamConfiguration": { - "bucketPolicyOnly": { - "enabled": false - }, - "publicAccessPrevention": "inherited", - "uniformBucketLevelAccess": { - "enabled": false - } - }, - "id": "000000000000000000000", - "kind": "storage#bucket", - "labels": { - "cnrm-test": "true", - "label-one": "value-one", - "managed-by-cnrm": "true", - "newkey": "newval" - }, - "location": "US", - "locationType": "multi-region", - "metageneration": "3", - "name": "storagebucket-sample-${uniqueId}", - "projectNumber": "${projectNumber}", - "rpo": "DEFAULT", - "selfLink": "https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId}", - "softDeletePolicy": { - "effectiveTime": "2024-04-01T12:34:56.123456Z", - "retentionDurationSeconds": "604800" - }, - "storageClass": "STANDARD", - "timeCreated": "2024-04-01T12:34:56.123456Z", - "updated": "2024-04-01T12:34:56.123456Z", - "versioning": { - "enabled": true + "enabled": false } } diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/create.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/create.yaml new file mode 100644 index 0000000000..dcd0ef40ec --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/create.yaml @@ -0,0 +1,30 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + labels: + label-one: "value-one" + name: storagebucket-sample-${uniqueId} +spec: + versioning: + enabled: true + lifecycleRule: + - action: + type: Delete + condition: + age: 7 + softDeletePolicy: + retentionDurationSeconds: 604800 diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/update.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/update.yaml new file mode 100644 index 0000000000..7e45a0c57c --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/update.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + labels: + label-one: "value-one" + newkey: "newval" + name: storagebucket-sample-${uniqueId} +spec: + versioning: + enabled: false + lifecycleRule: [] + softDeletePolicy: {} diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_generated_object_storagebucket.golden.yaml b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml similarity index 88% rename from pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_generated_object_storagebucket.golden.yaml rename to pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml index f7b3c3002a..80f40114e0 100644 --- a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/_generated_object_storagebucket.golden.yaml +++ b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml @@ -4,6 +4,7 @@ metadata: annotations: cnrm.cloud.google.com/management-conflict-prevention-policy: none cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/reconcile-interval-in-seconds: "10" cnrm.cloud.google.com/state-into-spec: merge finalizers: - cnrm.cloud.google.com/finalizer @@ -11,8 +12,6 @@ metadata: generation: 4 labels: cnrm-test: "true" - label-one: value-one - newkey: newval name: storagebucket-sample-${uniqueId} namespace: ${uniqueId} spec: @@ -21,7 +20,7 @@ spec: resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: effectiveTime: "1970-01-01T00:00:00Z" - retentionDurationSeconds: 0 + retentionDurationSeconds: 604800 storageClass: STANDARD versioning: enabled: true @@ -36,6 +35,6 @@ status: observedState: softDeletePolicy: effectiveTime: "1970-01-01T00:00:00Z" - retentionDurationSeconds: 0 + retentionDurationSeconds: 604800 selfLink: https://www.googleapis.com/storage/v1/b/storagebucket-sample-${uniqueId} url: gs://storagebucket-sample-${uniqueId} From 936da0bc772a2b7d482f3691f3afcde451a8363d Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 21 Jun 2024 20:31:07 -0400 Subject: [PATCH 012/101] mockgcp: more fidelity on StorageBucket --- mockgcp/mockstorage/bucket.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mockgcp/mockstorage/bucket.go b/mockgcp/mockstorage/bucket.go index cca69871af..ea2b43f727 100644 --- a/mockgcp/mockstorage/bucket.go +++ b/mockgcp/mockstorage/bucket.go @@ -213,6 +213,20 @@ func (s *buckets) PatchBucket(ctx context.Context, req *pb.PatchBucketRequest) ( if patch.Versioning != nil { obj.Versioning = patch.Versioning } + + if patch.SoftDeletePolicy != nil { + if patch.SoftDeletePolicy.RetentionDurationSeconds != nil { + if obj.SoftDeletePolicy == nil { + obj.SoftDeletePolicy = &pb.BucketSoftDeletePolicy{} + } + obj.SoftDeletePolicy.RetentionDurationSeconds = patch.SoftDeletePolicy.RetentionDurationSeconds + + // If the value is zero, we clear the effectiveTime (apparently) + if obj.GetSoftDeletePolicy().GetRetentionDurationSeconds() == 0 { + obj.SoftDeletePolicy.EffectiveTime = nil + } + } + } } // Remove empty lifecycle (no rules) From a3506d191a2b50a856f7c06bea2711f533274521 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 20 Jun 2024 18:40:33 -0400 Subject: [PATCH 013/101] monitoringdashboard: add support for text.style Only implemented in the new reconciler, but (helpfully) ignored by the old reconciler. --- .../v1beta1/monitoringdashboard_types.go | 2 - .../v1beta1/zz_generated.deepcopy.go | 5 + ...ards.monitoring.cnrm.cloud.google.com.yaml | 131 +++++++ docs/releasenotes/release-1.120.md | 3 + .../v1beta1/monitoringdashboard_types.go | 34 ++ .../v1beta1/zz_generated.deepcopy.go | 56 +++ .../dashboard_generated.mappings.go | 4 +- .../direct/monitoring/roundtrip_test.go | 2 - ...ted_export_monitoringdashboardbasic.golden | 1 + .../monitoringdashboardbasic/_http.log | 275 ++++++++++++++ ...ated_export_monitoringdashboardfull.golden | 8 + ...object_monitoringdashboardfull.golden.yaml | 8 + .../monitoringdashboardfull/_http.log | 31 +- .../monitoringdashboardfull/create.yaml | 8 + ...ated_export_monitoringdashboardrefs.golden | 1 + .../monitoringdashboardrefs/_http.log | 272 ++++++++++++++ .../monitoring/monitoringdashboard.md | 352 ++++++++++++++++++ 17 files changed, 1184 insertions(+), 9 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 2418033947..b73adfe300 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -263,10 +263,8 @@ type Text struct { // How the text content is formatted. Format *string `json:"format,omitempty"` - /*NOTYET // How the text is styled Style *Text_TextStyle `json:"style,omitempty"` - */ } // +kcc:proto=google.monitoring.dashboard.v1.Text.TextStyle diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index ef72e09ff1..cf6c976c05 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -882,6 +882,11 @@ func (in *Text) DeepCopyInto(out *Text) { *out = new(string) **out = **in } + if in.Style != nil { + in, out := &in.Style, &out.Style + *out = new(Text_TextStyle) + (*in).DeepCopyInto(*out) + } return } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index b407fc257f..c1cf5c63ae 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -784,6 +784,39 @@ spec: format: description: How the text content is formatted. type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex + string. "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title + and content. The title will still be larger + relative to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around + the widget + type: string + pointerLocation: + description: The pointer location for this + widget (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object type: object title: description: Optional. The title of the widget. @@ -2148,6 +2181,38 @@ spec: format: description: How the text content is formatted. type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and content. + The title will still be larger relative to the + content. + type: string + horizontalAlignment: + description: The horizontal alignment of both the + title and content + type: string + padding: + description: The amount of padding around the widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. "#RRGGBB" + or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both the + title and content + type: string + type: object type: object title: description: Optional. The title of the widget. @@ -3496,6 +3561,39 @@ spec: format: description: How the text content is formatted. type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and + content. The title will still be larger relative + to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around the + widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object type: object title: description: Optional. The title of the widget. @@ -4936,6 +5034,39 @@ spec: format: description: How the text content is formatted. type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex + string. "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title + and content. The title will still be larger + relative to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around + the widget + type: string + pointerLocation: + description: The pointer location for this + widget (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object type: object title: description: Optional. The title of the widget. diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 39cdd2e2da..a8f1a5e07f 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -24,6 +24,9 @@ output fields from GCP APIs are in `status.observedState.*` * `MonitoringAlertPolicy` * Added `spec.severity` field. +* `MonitoringDashboard` + * Added `style` fields to text widgets. + * `StorageBucket` * Added `spec.softDeletePolicy` field. * Added `status.observedState.softDeletePolicy` field. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index d9af3a5a4f..d66ba93636 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -360,6 +360,36 @@ type DashboardSparkChartView struct { SparkChartType string `json:"sparkChartType"` } +type DashboardStyle struct { + /* The background color as a hex string. "#RRGGBB" or "#RGB" */ + // +optional + BackgroundColor *string `json:"backgroundColor,omitempty"` + + /* Font sizes for both the title and content. The title will still be larger relative to the content. */ + // +optional + FontSize *string `json:"fontSize,omitempty"` + + /* The horizontal alignment of both the title and content */ + // +optional + HorizontalAlignment *string `json:"horizontalAlignment,omitempty"` + + /* The amount of padding around the widget */ + // +optional + Padding *string `json:"padding,omitempty"` + + /* The pointer location for this widget (also sometimes called a "tail") */ + // +optional + PointerLocation *string `json:"pointerLocation,omitempty"` + + /* The text color as a hex string. "#RRGGBB" or "#RGB" */ + // +optional + TextColor *string `json:"textColor,omitempty"` + + /* The vertical alignment of both the title and content */ + // +optional + VerticalAlignment *string `json:"verticalAlignment,omitempty"` +} + type DashboardText struct { /* The text content to be displayed. */ // +optional @@ -368,6 +398,10 @@ type DashboardText struct { /* How the text content is formatted. */ // +optional Format *string `json:"format,omitempty"` + + /* How the text is styled */ + // +optional + Style *DashboardStyle `json:"style,omitempty"` } type DashboardThresholds struct { diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 84c6e5234c..f170f0ee73 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1023,6 +1023,57 @@ func (in *DashboardSparkChartView) DeepCopy() *DashboardSparkChartView { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardStyle) DeepCopyInto(out *DashboardStyle) { + *out = *in + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.HorizontalAlignment != nil { + in, out := &in.HorizontalAlignment, &out.HorizontalAlignment + *out = new(string) + **out = **in + } + if in.Padding != nil { + in, out := &in.Padding, &out.Padding + *out = new(string) + **out = **in + } + if in.PointerLocation != nil { + in, out := &in.PointerLocation, &out.PointerLocation + *out = new(string) + **out = **in + } + if in.TextColor != nil { + in, out := &in.TextColor, &out.TextColor + *out = new(string) + **out = **in + } + if in.VerticalAlignment != nil { + in, out := &in.VerticalAlignment, &out.VerticalAlignment + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardStyle. +func (in *DashboardStyle) DeepCopy() *DashboardStyle { + if in == nil { + return nil + } + out := new(DashboardStyle) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardText) DeepCopyInto(out *DashboardText) { *out = *in @@ -1036,6 +1087,11 @@ func (in *DashboardText) DeepCopyInto(out *DashboardText) { *out = new(string) **out = **in } + if in.Style != nil { + in, out := &in.Style, &out.Style + *out = new(DashboardStyle) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index d12fa0d4ad..48b6172f21 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -596,7 +596,7 @@ func Text_FromProto(mapCtx *MapContext, in *pb.Text) *krm.Text { out := &krm.Text{} out.Content = LazyPtr(in.GetContent()) out.Format = Enum_FromProto(mapCtx, in.Format) - // MISSING: Style + out.Style = Text_TextStyle_FromProto(mapCtx, in.GetStyle()) return out } func Text_ToProto(mapCtx *MapContext, in *krm.Text) *pb.Text { @@ -606,7 +606,7 @@ func Text_ToProto(mapCtx *MapContext, in *krm.Text) *pb.Text { out := &pb.Text{} out.Content = ValueOf(in.Content) out.Format = Enum_ToProto[pb.Text_Format](mapCtx, in.Format) - // MISSING: Style + out.Style = Text_TextStyle_ToProto(mapCtx, in.Style) return out } func Text_TextStyle_FromProto(mapCtx *MapContext, in *pb.Text_TextStyle) *krm.Text_TextStyle { diff --git a/pkg/controller/direct/monitoring/roundtrip_test.go b/pkg/controller/direct/monitoring/roundtrip_test.go index 456c593945..2d86cf5e14 100644 --- a/pkg/controller/direct/monitoring/roundtrip_test.go +++ b/pkg/controller/direct/monitoring/roundtrip_test.go @@ -90,8 +90,6 @@ func FuzzMonitoringDashboardSpec(f *testing.F) { unimplementedFields.Insert(widgetPath + ".incident_list") - unimplementedFields.Insert(widgetPath + ".text.style") - unimplementedFields.Insert(widgetPath + ".id") } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden index bbe56ec3d1..6167bc9cb8 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_generated_export_monitoringdashboardbasic.golden @@ -24,6 +24,7 @@ spec: - text: content: Widget 2 format: MARKDOWN + style: {} - title: Widget 3 xyChart: dataSets: diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log index c669dd807f..85950b510a 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardbasic/_http.log @@ -304,6 +304,281 @@ Content-Type: application/json User-Agent: kcc/controller-manager x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": 1, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "100s", + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "text": { + "content": "Widget 2", + "format": 1 + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": 1, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "60s", + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "100s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "60s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "100s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "60s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +PATCH https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + { "columnLayout": { "columns": [ diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index b78484894e..682cd09570 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -24,6 +24,14 @@ spec: - text: content: Widget 2 format: MARKDOWN + style: + backgroundColor: '#000' + fontSize: FS_LARGE + horizontalAlignment: H_CENTER + padding: P_MEDIUM + pointerLocation: PL_TOP_LEFT + textColor: '#fff' + verticalAlignment: V_CENTER - title: Widget 3 xyChart: dataSets: diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index bb77be910a..3a80d17207 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -33,6 +33,14 @@ spec: - text: content: Widget 2 format: MARKDOWN + style: + backgroundColor: '#000' + fontSize: FS_LARGE + horizontalAlignment: H_CENTER + padding: P_MEDIUM + pointerLocation: PL_TOP_LEFT + textColor: '#fff' + verticalAlignment: V_CENTER - title: Widget 3 xyChart: dataSets: diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 04691b8a38..e64f2d7e68 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -62,7 +62,16 @@ x-goog-request-params: parent=projects%2F${projectId} { "text": { "content": "Widget 2", - "format": 1 + "format": 1, + "style": { + "backgroundColor": "#000", + "fontSize": 4, + "horizontalAlignment": 2, + "padding": 3, + "pointerLocation": 5, + "textColor": "#fff", + "verticalAlignment": 2 + } } }, { @@ -151,7 +160,15 @@ X-Xss-Protection: 0 "text": { "content": "Widget 2", "format": "MARKDOWN", - "style": {} + "style": { + "backgroundColor": "#000", + "fontSize": "FS_LARGE", + "horizontalAlignment": "H_CENTER", + "padding": "P_MEDIUM", + "pointerLocation": "PL_TOP_LEFT", + "textColor": "#fff", + "verticalAlignment": "V_CENTER" + } } }, { @@ -249,7 +266,15 @@ X-Xss-Protection: 0 "text": { "content": "Widget 2", "format": "MARKDOWN", - "style": {} + "style": { + "backgroundColor": "#000", + "fontSize": "FS_LARGE", + "horizontalAlignment": "H_CENTER", + "padding": "P_MEDIUM", + "pointerLocation": "PL_TOP_LEFT", + "textColor": "#fff", + "verticalAlignment": "V_CENTER" + } } }, { diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 57d88c03ba..3e3940202c 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -39,6 +39,14 @@ spec: - text: content: "Widget 2" format: "MARKDOWN" + style: + backgroundColor: "#000" + textColor: "#fff" + horizontalAlignment: H_CENTER + verticalAlignment: V_CENTER + fontSize: FS_LARGE + padding: P_MEDIUM + pointerLocation: PL_TOP_LEFT - title: "Widget 3" xyChart: dataSets: diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden index f7ff2add8f..52659b8118 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_generated_export_monitoringdashboardrefs.golden @@ -24,6 +24,7 @@ spec: - text: content: Widget 2 format: MARKDOWN + style: {} - title: Widget 3 xyChart: dataSets: diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log index 095f6b14f9..2ac39bcd75 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardrefs/_http.log @@ -453,6 +453,278 @@ Content-Type: application/json User-Agent: kcc/controller-manager x-goog-request-params: dashboard.name=projects%2Fother${uniqueId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": 1, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "text": { + "content": "Widget 2", + "format": 1 + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": 3, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/other${uniqueId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "name": "projects/other${uniqueId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/other${uniqueId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/other${uniqueId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2Fother${uniqueId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "0s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": {} + } + }, + { + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/other${uniqueId}" + ] + }, + "title": "Widget 4" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-sample", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +PATCH https://monitoring.googleapis.com/v1/projects/other${uniqueId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: dashboard.name=projects%2Fother${uniqueId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + { "columnLayout": { "columns": [ diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 54d8828007..8ab2101e54 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -152,6 +152,14 @@ columnLayout: text: content: string format: string + style: + backgroundColor: string + fontSize: string + horizontalAlignment: string + padding: string + pointerLocation: string + textColor: string + verticalAlignment: string title: string xyChart: chartOptions: @@ -295,6 +303,14 @@ gridLayout: text: content: string format: string + style: + backgroundColor: string + fontSize: string + horizontalAlignment: string + padding: string + pointerLocation: string + textColor: string + verticalAlignment: string title: string xyChart: chartOptions: @@ -439,6 +455,14 @@ mosaicLayout: text: content: string format: string + style: + backgroundColor: string + fontSize: string + horizontalAlignment: string + padding: string + pointerLocation: string + textColor: string + verticalAlignment: string title: string xyChart: chartOptions: @@ -591,6 +615,14 @@ rowLayout: text: content: string format: string + style: + backgroundColor: string + fontSize: string + horizontalAlignment: string + padding: string + pointerLocation: string + textColor: string + verticalAlignment: string title: string xyChart: chartOptions: @@ -1690,6 +1722,86 @@ rowLayout:

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ +

columnLayout.columns[].widgets[].title

@@ -3597,6 +3709,86 @@ rowLayout:

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + +

gridLayout.widgets[].text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ +

gridLayout.widgets[].title

@@ -5514,6 +5706,86 @@ rowLayout:

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.title

@@ -7521,6 +7793,86 @@ rowLayout:

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ +

rowLayout.rows[].widgets[].title

From e7b866d5795d0c7b0a12afddf337c337d17dc041 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 17 Jun 2024 23:59:02 +0000 Subject: [PATCH 014/101] chore: promote CloudIDSEndpoint to beta Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- ...points.cloudids.cnrm.cloud.google.com.yaml | 210 +++++++++++++++++- .../servicemappings/cloudids.yaml | 5 +- .../resource-autogen/allowlist/allowlist.go | 1 - .../embed/generated/assets_vfsdata.go | 8 - 4 files changed, 201 insertions(+), 23 deletions(-) rename {scripts/resource-autogen/generated => config}/servicemappings/cloudids.yaml (95%) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml index ee8474aa80..8c18da9c9b 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml @@ -6,7 +6,7 @@ metadata: creationTimestamp: null labels: cnrm.cloud.google.com/managed-by-kcc: "true" - cnrm.cloud.google.com/stability-level: alpha + cnrm.cloud.google.com/stability-level: stable cnrm.cloud.google.com/system: "true" cnrm.cloud.google.com/tf2crd: "true" name: cloudidsendpoints.cloudids.cnrm.cloud.google.com @@ -40,7 +40,7 @@ spec: jsonPath: .status.conditions[?(@.type=='Ready')].lastTransitionTime name: Status Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: properties: @@ -172,16 +172,187 @@ spec: type: string type: object type: array - createTime: - description: Creation timestamp in RFC 3339 text format. + observedGeneration: + description: ObservedGeneration is the generation of the resource + that was most recently observed by the Config Connector controller. + If this is equal to metadata.generation, then that means that the + current reported status reflects the most recent desired state of + the resource. + type: integer + observedState: + description: The observed state of the underlying GCP resource. + properties: + createTime: + description: Creation timestamp in RFC 3339 text format. + type: string + endpointForwardingRule: + description: URL of the endpoint's network address to which traffic + is to be sent by Packet Mirroring. + type: string + endpointIp: + description: Internal IP address of the endpoint's network entry + point. + type: string + updateTime: + description: Last update timestamp in RFC 3339 text format. + type: string + type: object + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: When 'True', the most recent reconcile of the resource succeeded + jsonPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - description: The reason for the value in 'Ready' + jsonPath: .status.conditions[?(@.type=='Ready')].reason + name: Status + type: string + - description: The last transition time for the value in 'Status' + jsonPath: .status.conditions[?(@.type=='Ready')].lastTransitionTime + name: Status Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'apiVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + description: + description: Immutable. An optional description of the endpoint. type: string - endpointForwardingRule: - description: URL of the endpoint's network address to which traffic - is to be sent by Packet Mirroring. + location: + description: Immutable. The location for the endpoint. type: string - endpointIp: - description: Internal IP address of the endpoint's network entry point. + networkRef: + description: |- + Immutable. Name of the VPC network that is connected + to the IDS endpoint. This can either contain the VPC network name + itself (like "src-net") or the full URL to the network (like "projects/{project_id}/global/networks/src-net"). + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `name` field of a `ComputeNetwork` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + projectRef: + description: The project that this resource belongs to. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `name` field of a `Project` resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + resourceID: + description: Immutable. Optional. The name of the resource. Used for + creation and acquisition. When unset, the value of `metadata.name` + is used as the default. + type: string + severity: + description: 'Immutable. The minimum alert severity level that is + reported by the endpoint. Possible values: ["INFORMATIONAL", "LOW", + "MEDIUM", "HIGH", "CRITICAL"].' type: string + threatExceptions: + description: 'Configuration for threat IDs excluded from generating + alerts. Limit: 99 IDs.' + items: + type: string + type: array + required: + - location + - networkRef + - projectRef + - severity + type: object + status: + properties: + conditions: + description: Conditions represent the latest available observation + of the resource's current state. + items: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: Status is the status of the condition. Can be True, + False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + type: array observedGeneration: description: ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. @@ -189,9 +360,24 @@ spec: current reported status reflects the most recent desired state of the resource. type: integer - updateTime: - description: Last update timestamp in RFC 3339 text format. - type: string + observedState: + description: The observed state of the underlying GCP resource. + properties: + createTime: + description: Creation timestamp in RFC 3339 text format. + type: string + endpointForwardingRule: + description: URL of the endpoint's network address to which traffic + is to be sent by Packet Mirroring. + type: string + endpointIp: + description: Internal IP address of the endpoint's network entry + point. + type: string + updateTime: + description: Last update timestamp in RFC 3339 text format. + type: string + type: object type: object required: - spec diff --git a/scripts/resource-autogen/generated/servicemappings/cloudids.yaml b/config/servicemappings/cloudids.yaml similarity index 95% rename from scripts/resource-autogen/generated/servicemappings/cloudids.yaml rename to config/servicemappings/cloudids.yaml index 7f469a61d0..acd147faac 100644 --- a/scripts/resource-autogen/generated/servicemappings/cloudids.yaml +++ b/config/servicemappings/cloudids.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,10 +24,11 @@ spec: resources: - name: google_cloud_ids_endpoint kind: CloudIDSEndpoint - autoGenerated: true idTemplate: "projects/{{project}}/locations/{{location}}/endpoints/{{name}}" idTemplateCanBeUsedToMatchResourceName: false resourceAvailableInAssetInventory: false + v1alpha1ToV1beta1: true + storageVersion: v1alpha1 metadataMapping: name: name resourceID: diff --git a/scripts/resource-autogen/allowlist/allowlist.go b/scripts/resource-autogen/allowlist/allowlist.go index 41794d6bc1..b50e512990 100644 --- a/scripts/resource-autogen/allowlist/allowlist.go +++ b/scripts/resource-autogen/allowlist/allowlist.go @@ -69,7 +69,6 @@ var ( "cloud_asset/google_cloud_asset_folder_feed", "cloud_asset/google_cloud_asset_organization_feed", "cloud_asset/google_cloud_asset_project_feed", - "cloud_ids/google_cloud_ids_endpoint", "cloud_tasks/google_cloud_tasks_queue", "cloudfunctions2/google_cloudfunctions2_function", "cloudiot/google_cloudiot_device", diff --git a/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go b/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go index 9b27216f3e..5198091c6f 100644 --- a/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go +++ b/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go @@ -199,13 +199,6 @@ var Assets = func() http.FileSystem { compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x6f\xe2\x48\x10\xbd\xfb\x57\x3c\xc1\x65\x57\x4a\x20\xc9\xd1\xab\xd5\x8a\x21\x99\x8c\xb5\x09\x91\x80\xec\x68\x4e\x51\x61\x97\xed\x52\xda\xdd\xbd\xdd\x6d\x18\x84\xf8\xef\x2b\x7f\x90\x40\x32\x52\xe6\xb2\xd2\xf8\x82\xa8\x7a\xf5\xea\xd5\xab\xee\x1e\x62\x6a\xec\xd6\x49\x51\x06\x5c\x5d\x5c\x5d\xe1\xd6\x98\x42\x31\xee\xee\xa6\xd1\x30\x1a\xe2\x4e\x52\xd6\x9e\x33\xd4\x3a\x63\x87\x50\x32\x26\x96\xd2\x92\x0f\x99\x33\xfc\xc3\xce\x8b\xd1\xb8\x1a\x5d\xe0\xb7\x06\x30\xe8\x53\x83\xdf\xff\x88\x86\xd8\x9a\x1a\x15\x6d\xa1\x4d\x40\xed\x19\xa1\x14\x8f\x5c\x14\x83\xbf\xa7\x6c\x03\x44\x23\x35\x95\x55\x42\x3a\x65\x6c\x24\x94\x6d\x9b\x9e\x64\x14\x0d\xf1\xad\xa7\x30\xab\x40\xa2\x41\x48\x8d\xdd\xc2\xe4\xc7\x38\x50\x68\x05\xb7\x5f\x19\x82\x8d\xc7\xe3\xcd\x66\x33\xa2\x56\xed\xc8\xb8\x62\xac\x3a\xa4\x1f\xdf\x25\xd3\x9b\xd9\xe2\xe6\xfc\x6a\x74\xd1\xd6\x3c\x6a\xc5\xde\xc3\xf1\xbf\xb5\x38\xce\xb0\xda\x82\xac\x55\x92\xd2\x4a\x31\x14\x6d\x60\x1c\xa8\x70\xcc\x19\x82\x69\x04\x6f\x9c\x04\xd1\xc5\x19\xbc\xc9\xc3\x86\x1c\x47\x43\x64\xe2\x83\x93\x55\x1d\x4e\xdc\x3a\xc8\x13\x7f\x02\x30\x1a\xa4\x31\x98\x2c\x90\x2c\x06\xf8\x34\x59\x24\x8b\xb3\x68\x88\xaf\xc9\xf2\xcb\xc3\xe3\x12\x5f\x27\xf3\xf9\x64\xb6\x4c\x6e\x16\x78\x98\x63\xfa\x30\xbb\x4e\x96\xc9\xc3\x6c\x81\x87\xcf\x98\xcc\xbe\xe1\xef\x64\x76\x7d\x06\x96\x50\xb2\x03\x7f\xb7\xae\xd1\x6f\x1c\xa4\xf1\x91\xb3\xc6\xb4\x05\xf3\x89\x80\xdc\x74\x82\xbc\xe5\x54\x72\x49\xa1\x48\x17\x35\x15\x8c\xc2\xac\xd9\x69\xd1\x05\x2c\xbb\x4a\x7c\xb3\x4d\x0f\xd2\x59\x34\x84\x92\x4a\x02\x85\x36\xf2\x6e\xa8\x51\x14\x91\x95\x7e\xff\x31\x52\xe3\x78\x94\x6a\x57\x8d\x52\x65\xea\x6c\x54\xb4\x47\x69\x94\x9a\x6a\xbc\xbe\x24\x65\x4b\xba\x8c\x9e\x45\x67\x31\x16\xec\xd6\x92\xf2\x3d\x59\x2b\xba\x88\x2a\x0e\x94\x51\xa0\x38\x02\x34\x55\x1c\xa3\x25\x90\x8c\x75\x90\xb0\xfd\x31\x67\x8f\xf5\x96\xd2\xa6\x40\xbb\xea\xdc\x6f\x7d\xe0\x2a\x6a\x26\x7c\xa5\x9a\x36\x75\x49\x4f\x15\x01\xeb\x83\xdc\xf5\xe5\x8a\x03\x5d\x46\x80\xef\xe4\x7c\x31\x3e\xcc\x7e\xd0\xbe\xeb\x49\x56\x7c\xdf\xd7\xb1\x37\xb5\x4b\xd9\x37\x5d\x80\xf3\xbe\x53\x87\x7b\x6a\x8b\x9f\x0e\xd5\x4f\x85\x33\xb5\x8d\xba\x73\xd9\x4d\x7f\xa2\xe8\xf6\x28\x4d\x75\x30\xb7\xac\xd9\x51\xe0\x2c\x46\x70\x35\xf7\x19\xc9\x96\x5c\x59\x45\x81\x63\x0c\x76\xbb\xa6\xdf\x7e\x3f\x78\x97\x9c\x92\xfe\xc4\x8f\x9e\xb3\xa5\xb9\xa7\x90\x96\xf3\x5e\x68\x37\x55\x4e\xca\x1f\x08\x0f\x23\x4c\xd6\x24\xaa\x39\xe6\x89\x9e\x78\xcf\x21\xd1\x6b\xd6\xc1\xb8\xed\x29\xfc\xb0\xa1\x7e\x63\x71\x1f\x06\x14\xad\x58\xf9\xb8\xff\xed\xc3\x8d\x9f\xec\x5e\x26\x49\xae\x3f\x0b\xab\x2c\x6e\x6d\x7a\xd3\x3f\xb9\x7e\xe5\x0a\xe4\x0a\x0e\xef\xb1\xc0\x9a\x54\xcd\x47\x0e\xb4\x9e\xfa\xbf\x44\x4b\x10\x52\xad\x85\x53\xa3\x73\x29\xfe\xdc\xed\xfa\x60\xe7\xfb\x53\xda\x86\xf7\xfb\xf1\x6e\xd7\x92\x1c\x5c\xfb\x89\x9d\x3d\x55\x5c\xad\xd8\xf9\x52\x3e\x5a\xdf\xfd\x5b\xe0\xaf\xbb\xc8\xff\x7d\x35\xbb\x5d\x6b\xde\x7e\x3f\x7e\xb5\xcf\xbf\x75\xff\xb5\xc9\x9c\x73\x76\xac\x5f\xee\x52\xb7\x9b\x67\xde\xc6\x68\x79\xe6\x9c\xbf\xc4\x81\x90\xf7\x12\x8e\x2f\x55\xf3\x15\xeb\xe7\xf8\xe8\xef\x87\x37\xad\xd7\xfe\xfe\x29\x38\xa2\x6c\xe0\x3f\xfb\x12\x7d\xec\x13\x60\xc9\xb1\x0e\xfd\x69\xf8\x2f\x00\x00\xff\xff\x84\x11\x85\xbe\x78\x07\x00\x00"), }, - "/cloudids.yaml": &vfsgen۰CompressedFileInfo{ - name: "cloudids.yaml", - modTime: time.Time{}, - uncompressedSize: 2016, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\x4d\x6f\xdb\x38\x10\xbd\xeb\x57\x0c\xec\x4b\x0b\x24\x52\x92\xa3\xf7\xe4\x3a\x69\x2b\x6c\xea\x14\x96\xd3\xa2\xa7\x60\x4c\x8d\xa4\xd9\x50\x24\x97\x1c\xd9\x35\xb2\xfe\xef\x0b\x7d\x25\x71\xd2\x00\xd1\xc1\x20\x86\x6f\xde\x3c\xce\x1b\xd2\x53\x58\x58\xb7\xf7\x5c\x56\x02\x17\x67\x17\x17\xf0\xc5\xda\x52\x13\x5c\x5f\x2f\xa2\x69\x34\x85\x6b\x56\x64\x02\xe5\xd0\x98\x9c\x3c\x48\x45\x30\x77\xa8\x2a\x1a\x77\x4e\xe0\x07\xf9\xc0\xd6\xc0\x45\x7c\x06\x1f\x5a\xc0\x64\xd8\x9a\x7c\xfc\x2b\x9a\xc2\xde\x36\x50\xe3\x1e\x8c\x15\x68\x02\x81\x54\x1c\xa0\x60\x4d\x40\xbf\x15\x39\x01\x36\xa0\x6c\xed\x34\xa3\x51\x04\x3b\x96\xaa\x2b\x33\x90\xc4\xd1\x14\x7e\x0d\x14\x76\x23\xc8\x06\x10\x94\x75\x7b\xb0\xc5\x73\x1c\xa0\x74\x82\xbb\xaf\x12\x71\xb3\x24\xd9\xed\x76\x31\x76\x6a\x63\xeb\xcb\x44\xf7\xc8\x90\x5c\xa7\x8b\xab\x65\x76\x75\x7a\x11\x9f\x75\x39\xb7\x46\x53\x08\xe0\xe9\xdf\x86\x3d\xe5\xb0\xd9\x03\x3a\xa7\x59\xe1\x46\x13\x68\xdc\x81\xf5\x80\xa5\x27\xca\x41\x6c\x2b\x78\xe7\x59\xd8\x94\x27\x10\x6c\x21\x3b\xf4\x14\x4d\x21\xe7\x20\x9e\x37\x8d\x1c\x75\x6b\x94\xc7\xe1\x08\x60\x0d\xa0\x81\xc9\x3c\x83\x34\x9b\xc0\xa7\x79\x96\x66\x27\xd1\x14\x7e\xa6\xeb\xaf\x37\xb7\x6b\xf8\x39\x5f\xad\xe6\xcb\x75\x7a\x95\xc1\xcd\x0a\x16\x37\xcb\xcb\x74\x9d\xde\x2c\x33\xb8\xf9\x0c\xf3\xe5\x2f\xf8\x3b\x5d\x5e\x9e\x00\xb1\x54\xe4\x81\x7e\x3b\xdf\xea\xb7\x1e\xb8\xed\x23\xe5\x6d\xd3\x32\xa2\x23\x01\x85\xed\x05\x05\x47\x8a\x0b\x56\xa0\xd1\x94\x0d\x96\x04\xa5\xdd\x92\x37\x6c\x4a\x70\xe4\x6b\x0e\xad\x9b\x01\xd0\xe4\xd1\x14\x34\xd7\x2c\x28\x5d\xe4\xd5\xa1\xe2\x28\x42\xc7\x83\xff\x33\x50\xd6\x53\xac\x8c\xaf\x63\xa5\x6d\x93\xc7\x65\x37\x4a\xb1\xb2\x75\xb2\x3d\x47\xed\x2a\x3c\x8f\xee\xd9\xe4\x33\xc8\xc8\x6f\x59\xd1\x37\x74\x8e\x4d\x19\xd5\x24\x98\xa3\xe0\x2c\x02\x30\x58\xd3\x0c\x3a\x02\xce\xc3\x9f\xe9\x06\x58\x70\xa8\x5a\xac\xf1\xf5\x69\xd8\x07\xa1\x3a\x6a\x0f\xf7\xc4\xb2\x68\xf3\xd2\xcb\x2c\x02\xd8\x8e\x22\xb7\xe7\x1b\x12\x3c\x8f\x00\x42\x2f\xe2\xab\x0d\xb2\x3c\x2e\xda\x57\x42\xc7\x61\xa8\xe6\x29\xd8\xc6\x2b\x0a\x2d\x37\xc0\xe9\xc0\xdf\xe3\xee\xba\xbc\x3b\xce\xc3\x1d\x99\xdc\x59\x36\x12\xf5\x63\xd8\x1f\x76\x54\x71\x75\xbc\x89\x8d\xd8\x2f\x64\xc8\xa3\x50\x3e\x03\xf1\x0d\x0d\x3b\x9c\xaf\xa9\x76\x1a\x85\x66\x30\x71\xde\xfe\x43\x4a\x42\xf2\xf0\x30\x2c\x0f\x87\x44\x5b\xd5\x7b\x92\x3c\x3c\x8c\xeb\xc3\x21\x19\xcb\xb7\xe1\x56\xe1\xe1\x30\x79\x45\xb9\x40\xf3\x89\x6e\x03\xe5\x6b\xfb\x0d\x45\x55\xab\xe1\x68\x7d\x0b\x0a\xd4\x61\x94\x31\x1e\x7a\xbe\x45\xd6\xed\x4d\x48\xcd\x3c\x04\x92\xd4\x6c\xc9\x88\xf5\xfb\x63\xf8\x68\xe2\x60\xea\x6c\x08\x8f\x5e\xb4\xbf\x2f\x88\xd3\xcb\x27\x90\xa0\x2f\x49\x3e\x33\xe9\xfc\x08\x5b\x31\x79\xf4\xaa\x62\x85\x7a\x45\x05\x79\x32\x8f\x36\xf4\x56\xc8\xde\xd1\x0c\x86\xde\x3c\xc6\x01\xee\x69\xff\x18\x5e\x51\xf1\xa2\xf6\x1b\x5c\xc5\xa0\xe0\xbd\x74\xed\x97\x53\x50\x9e\x9d\x74\xe3\xf5\xdf\xe9\xb3\x1d\x80\x75\x45\x63\x12\x48\x85\xd2\xbf\x7e\xa3\x08\xd8\x90\xb6\xa6\x0c\x20\x36\x7e\x96\x56\x6e\xef\x67\x47\x2c\xfd\x1c\x7d\x7f\xa5\xe9\x8f\x63\xfd\x8c\xc7\xdb\xc6\xcd\x1e\xab\xd5\x68\xb0\x24\xff\xe6\x95\x7a\xd5\x04\x43\xb2\xb3\xfe\xfe\x65\x13\x86\xf0\xfb\x9b\x90\xd6\x75\x23\xed\x00\xc5\xd0\x4e\xd9\xf8\x70\xff\xf8\xbe\x18\xb9\xfa\xde\x70\x00\x65\x8d\x21\x25\x94\x1f\x11\x88\xed\x12\xd2\xcb\x0c\xc6\x11\x8f\x61\xdd\x76\x52\xa1\x19\x5f\x41\x65\x4d\xf7\xef\xf0\x92\xfa\xd9\x30\xf5\x1f\x4b\x20\x5d\xc0\x07\xcd\xf7\x04\x93\xe0\xd5\xa9\x21\x99\x7c\x84\xe1\x7d\x2c\x1a\xad\xe1\x76\x75\x3d\x56\x1d\x79\x06\xfc\xd3\x7d\x1c\x56\x77\x9c\x1f\x92\x52\xdb\x0d\xea\x64\xc0\x86\xe4\x91\xf5\x1d\xbe\x2e\x6c\xed\x1a\xa1\xe5\xab\x6e\xbf\xcf\x5e\xd5\xa7\xbf\x61\xeb\xff\x01\x00\x00\xff\xff\xf4\x41\xd8\x74\xe0\x07\x00\x00"), - }, "/cloudiot.yaml": &vfsgen۰CompressedFileInfo{ name: "cloudiot.yaml", modTime: time.Time{}, @@ -800,7 +793,6 @@ var Assets = func() http.FileSystem { fs["/cloudfunctions.yaml"].(os.FileInfo), fs["/cloudfunctions2.yaml"].(os.FileInfo), fs["/cloudidentity.yaml"].(os.FileInfo), - fs["/cloudids.yaml"].(os.FileInfo), fs["/cloudiot.yaml"].(os.FileInfo), fs["/cloudrun.yaml"].(os.FileInfo), fs["/cloudrunv2.yaml"].(os.FileInfo), From 02ed05b7489f58dae4916c4cb49ba0102415ab2d Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 20 Jun 2024 20:43:10 +0000 Subject: [PATCH 015/101] docs:chore: add sample Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../cloudids_v1beta1_cloudidsendpoint.yaml | 25 ++ .../cloudids_v1beta1_cloudnetwork-dep.yaml | 20 + .../cloudids_v1beta1_computeaddress-dep.yaml | 25 ++ ...beta1_servicenetworkingconnection-dep.yaml | 24 + .../cloudids/cloudidsendpoint.md | 422 ++++++++++++++++++ 5 files changed, 516 insertions(+) create mode 100644 config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml create mode 100644 config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml create mode 100644 config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml create mode 100644 config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml create mode 100644 scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml new file mode 100644 index 0000000000..17acff903d --- /dev/null +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml @@ -0,0 +1,25 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 +kind: CloudIDSEndpoint +metadata: + name: cloudidsendpoint-sample +spec: + networkRef: + name: computenetwork-dep + severity: INFORMATIONAL + location: us-west2-a + projectRef: + external: ${PROJECT_ID?} diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml new file mode 100644 index 0000000000..d10a84d5c0 --- /dev/null +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml @@ -0,0 +1,20 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computenetwork-dep +spec: + autoCreateSubnetworks: false \ No newline at end of file diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml new file mode 100644 index 0000000000..21a04ddd7b --- /dev/null +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml @@ -0,0 +1,25 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeAddress +metadata: + name: computeaddress-dep +spec: + location: global + addressType: INTERNAL + networkRef: + name: computenetwork-dep + prefixLength: 16 + purpose: VPC_PEERING diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml new file mode 100644 index 0000000000..77a0d26257 --- /dev/null +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 +kind: ServiceNetworkingConnection +metadata: + name: servicenetworkingconnection-dep +spec: + networkRef: + name: computenetwork-dep + reservedPeeringRanges: + - name: computeaddress-dep + service: servicenetworking.googleapis.com \ No newline at end of file diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md new file mode 100644 index 0000000000..210c7ed632 --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -0,0 +1,422 @@ +{# AUTOGENERATED. DO NOT EDIT. #} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}CloudIDSEndpoint{% endblock %} +{% block body %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyValue
{{gcp_name_short}} Service NameCloud Intrusion Detection System Endpoint
{{gcp_name_short}} Service Documentation/intrusion-detection-system/docs/
{{gcp_name_short}} REST Resource Namev1.projects.locations.endpoints
{{product_name_short}} Resource Short Namesgcpcloudidsendpoint
gcpcloudidsendpoints
cloudidsendpoint
{{product_name_short}} Service Nameids.googleapis.com
{{product_name_short}} Resource Fully Qualified Namecloudidsendpoints.cloudids.cnrm.cloud.google.com
{{product_name_short}} Default Average Reconcile Interval In Seconds600
+ +## Custom Resource Definition Properties + + +### Annotations + + + + + + + + + + + +
Fields
cnrm.cloud.google.com/state-into-spec
+ + +### Spec +#### Schema +```yaml +description: string +location: string +networkRef: + external: string + name: string + namespace: string +projectRef: + external: string + name: string + namespace: string +resourceID: string +severity: string +threatExceptions: +- string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
+

description

+

Optional

+
+

string

+

{% verbatim %}Immutable. An optional description of the endpoint.{% endverbatim %}

+
+

location

+

Required

+
+

string

+

{% verbatim %}Immutable. The location for the endpoint.{% endverbatim %}

+
+

networkRef

+

Required

+
+

object

+

{% verbatim %}Immutable. Name of the VPC network that is connected +to the IDS endpoint. This can either contain the VPC network name +itself (like "src-net") or the full URL to the network (like "projects/{project_id}/global/networks/src-net").{% endverbatim %}

+
+

networkRef.external

+

Optional

+
+

string

+

{% verbatim %}Allowed value: The `name` field of a `ComputeNetwork` resource.{% endverbatim %}

+
+

networkRef.name

+

Optional

+
+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+
+

networkRef.namespace

+

Optional

+
+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+
+

projectRef

+

Required

+
+

object

+

{% verbatim %}The project that this resource belongs to.{% endverbatim %}

+
+

projectRef.external

+

Optional

+
+

string

+

{% verbatim %}Allowed value: The `name` field of a `Project` resource.{% endverbatim %}

+
+

projectRef.name

+

Optional

+
+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+
+

projectRef.namespace

+

Optional

+
+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+
+

resourceID

+

Optional

+
+

string

+

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+
+

severity

+

Required

+
+

string

+

{% verbatim %}Immutable. The minimum alert severity level that is reported by the endpoint. Possible values: ["INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL"].{% endverbatim %}

+
+

threatExceptions

+

Optional

+
+

list (string)

+

{% verbatim %}Configuration for threat IDs excluded from generating alerts. Limit: 99 IDs.{% endverbatim %}

+
+

threatExceptions[]

+

Optional

+
+

string

+

{% verbatim %}{% endverbatim %}

+
+ + + +### Status +#### Schema +```yaml +conditions: +- lastTransitionTime: string + message: string + reason: string + status: string + type: string +observedGeneration: integer +observedState: + createTime: string + endpointForwardingRule: string + endpointIp: string + updateTime: string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
conditions +

list (object)

+

{% verbatim %}Conditions represent the latest available observation of the resource's current state.{% endverbatim %}

+
conditions[] +

object

+

{% verbatim %}{% endverbatim %}

+
conditions[].lastTransitionTime +

string

+

{% verbatim %}Last time the condition transitioned from one status to another.{% endverbatim %}

+
conditions[].message +

string

+

{% verbatim %}Human-readable message indicating details about last transition.{% endverbatim %}

+
conditions[].reason +

string

+

{% verbatim %}Unique, one-word, CamelCase reason for the condition's last transition.{% endverbatim %}

+
conditions[].status +

string

+

{% verbatim %}Status is the status of the condition. Can be True, False, Unknown.{% endverbatim %}

+
conditions[].type +

string

+

{% verbatim %}Type is the type of the condition.{% endverbatim %}

+
observedGeneration +

integer

+

{% verbatim %}ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource.{% endverbatim %}

+
observedState +

object

+

{% verbatim %}The observed state of the underlying GCP resource.{% endverbatim %}

+
observedState.createTime +

string

+

{% verbatim %}Creation timestamp in RFC 3339 text format.{% endverbatim %}

+
observedState.endpointForwardingRule +

string

+

{% verbatim %}URL of the endpoint's network address to which traffic is to be sent by Packet Mirroring.{% endverbatim %}

+
observedState.endpointIp +

string

+

{% verbatim %}Internal IP address of the endpoint's network entry point.{% endverbatim %}

+
observedState.updateTime +

string

+

{% verbatim %}Last update timestamp in RFC 3339 text format.{% endverbatim %}

+
+ +## Sample YAML(s) + +### Typical Use Case +```yaml +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 +kind: CloudIDSEndpoint +metadata: + name: cloudidsendpoint-sample +spec: + networkRef: + name: computenetwork-dep + severity: INFORMATIONAL + location: us-west2-a + projectRef: + external: projects/${PROJECT_ID?} +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computenetwork-dep +spec: + autoCreateSubnetworks: false +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeAddress +metadata: + name: computeaddress-dep +spec: + location: global + addressType: INTERNAL + networkRef: + name: computenetwork-dep + prefixLength: 16 + purpose: VPC_PEERING +--- +apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 +kind: ServiceNetworkingConnection +metadata: + name: servicenetworkingconnection-dep +spec: + networkRef: + name: computenetwork-dep + reservedPeeringRanges: + - name: computeaddress-dep + service: servicenetworking.googleapis.com +``` + + +Note: If you have any trouble with instantiating the resource, refer to Troubleshoot Config Connector. + +{% endblock %} From e36b2897e15d0a8b920b047633488b93b2461070 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 20 Jun 2024 20:43:44 +0000 Subject: [PATCH 016/101] docs: add google docs Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../resource-reference/_toc.yaml | 4 ++ .../resource-reference/overview.md | 4 ++ .../templates/cloudids_cloudidsendpoint.tmpl | 50 +++++++++++++++++++ 3 files changed, 58 insertions(+) create mode 100644 scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl diff --git a/scripts/generate-google3-docs/resource-reference/_toc.yaml b/scripts/generate-google3-docs/resource-reference/_toc.yaml index 2a69d63448..37649fbe17 100644 --- a/scripts/generate-google3-docs/resource-reference/_toc.yaml +++ b/scripts/generate-google3-docs/resource-reference/_toc.yaml @@ -95,6 +95,10 @@ toc: path: /config-connector/docs/reference/resource-docs/cloudidentity/cloudidentitygroup.md - title: "CloudIdentityMembership" path: /config-connector/docs/reference/resource-docs/cloudidentity/cloudidentitymembership.md +- title: "Cloud Intrusion Detection System" + section: + - title: "CloudIDSEndpoint" + path: /config-connector/docs/reference/resource-docs/cloudids/cloudsidsendpoint.md - title: "Cloud Scheduler" section: - title: "CloudSchedulerJob" diff --git a/scripts/generate-google3-docs/resource-reference/overview.md b/scripts/generate-google3-docs/resource-reference/overview.md index 9d155b74ce..ed316d4355 100644 --- a/scripts/generate-google3-docs/resource-reference/overview.md +++ b/scripts/generate-google3-docs/resource-reference/overview.md @@ -842,6 +842,10 @@ issues for {{product_name_short}}. {{serverless_vpc_access_name}} VPCAccessConnector + + {{serverless_vpc_access_name}} + CloudIDSEndpoint + diff --git a/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl b/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl new file mode 100644 index 0000000000..d552a2251e --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl @@ -0,0 +1,50 @@ +{{template "headercomment.tmpl" .}} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}{{ .Kind}}{% endblock %} +{% block body %} +{{template "alphadisclaimer.tmpl" .}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyValue
{{"{{gcp_name_short}}"}} Service NameCloud Intrusion Detection System Endpoint
{{"{{gcp_name_short}}"}} Service Documentation/intrusion-detection-system/docs/
{{"{{gcp_name_short}}"}} REST Resource Namev1.projects.locations.endpoints
{{"{{product_name_short}}"}} Resource Short Names{{ .ShortNames}}
{{"{{product_name_short}}"}} Service Nameids.googleapis.com
{{"{{product_name_short}}"}} Resource Fully Qualified Name{{ .FullyQualifiedName}}
{{"{{product_name_short}}"}} Default Average Reconcile Interval In Seconds{{ .DefaultReconcileInterval}}
+ +{{template "resource.tmpl" .}} +{{template "endnote.tmpl" .}} +{% endblock %} From 5bf163868df8e4cd20ef2ab95d36c45e0376addc Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 20 Jun 2024 20:57:30 +0000 Subject: [PATCH 017/101] chore: make ready-pr Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../cloudidsendpoint_types.go | 22 +++++--- .../cloudids/{v1alpha1 => v1beta1}/doc.go | 6 +-- .../{v1alpha1 => v1beta1}/register.go | 6 +-- .../zz_generated.deepcopy.go | 43 ++++++++++++---- .../client/clientset/versioned/clientset.go | 16 +++--- .../versioned/fake/clientset_generated.go | 10 ++-- .../clientset/versioned/fake/register.go | 4 +- .../clientset/versioned/scheme/register.go | 4 +- .../{v1alpha1 => v1beta1}/cloudids_client.go | 36 ++++++------- .../{v1alpha1 => v1beta1}/cloudidsendpoint.go | 42 ++++++++-------- .../cloudids/{v1alpha1 => v1beta1}/doc.go | 2 +- .../{v1alpha1 => v1beta1}/fake/doc.go | 0 .../fake/fake_cloudids_client.go | 8 +-- .../fake/fake_cloudidsendpoint.go | 50 +++++++++---------- .../generated_expansion.go | 2 +- .../cloudids/cloudidsendpoint.md | 2 +- 16 files changed, 140 insertions(+), 113 deletions(-) rename pkg/clients/generated/apis/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint_types.go (94%) rename pkg/clients/generated/apis/cloudids/{v1alpha1 => v1beta1}/doc.go (90%) rename pkg/clients/generated/apis/cloudids/{v1alpha1 => v1beta1}/register.go (93%) rename pkg/clients/generated/apis/cloudids/{v1alpha1 => v1beta1}/zz_generated.deepcopy.go (85%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/cloudids_client.go (67%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint.go (78%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/doc.go (98%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/fake/doc.go (100%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/fake/fake_cloudids_client.go (77%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/fake/fake_cloudidsendpoint.go (71%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudids/{v1alpha1 => v1beta1}/generated_expansion.go (98%) diff --git a/pkg/clients/generated/apis/cloudids/v1alpha1/cloudidsendpoint_types.go b/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go similarity index 94% rename from pkg/clients/generated/apis/cloudids/v1alpha1/cloudidsendpoint_types.go rename to pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go index d0a67381fc..5be2e9d83e 100644 --- a/pkg/clients/generated/apis/cloudids/v1alpha1/cloudidsendpoint_types.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go @@ -28,7 +28,7 @@ // that future versions of the go-client may include breaking changes. // Please try it out and give us feedback! -package v1alpha1 +package v1beta1 import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" @@ -63,10 +63,7 @@ type CloudIDSEndpointSpec struct { ThreatExceptions []string `json:"threatExceptions,omitempty"` } -type CloudIDSEndpointStatus struct { - /* Conditions represent the latest available observations of the - CloudIDSEndpoint's current state. */ - Conditions []v1alpha1.Condition `json:"conditions,omitempty"` +type EndpointObservedStateStatus struct { /* Creation timestamp in RFC 3339 text format. */ // +optional CreateTime *string `json:"createTime,omitempty"` @@ -79,20 +76,29 @@ type CloudIDSEndpointStatus struct { // +optional EndpointIp *string `json:"endpointIp,omitempty"` + /* Last update timestamp in RFC 3339 text format. */ + // +optional + UpdateTime *string `json:"updateTime,omitempty"` +} + +type CloudIDSEndpointStatus struct { + /* Conditions represent the latest available observations of the + CloudIDSEndpoint's current state. */ + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` /* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */ // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - /* Last update timestamp in RFC 3339 text format. */ + /* The observed state of the underlying GCP resource. */ // +optional - UpdateTime *string `json:"updateTime,omitempty"` + ObservedState *EndpointObservedStateStatus `json:"observedState,omitempty"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories=gcp,shortName=gcpcloudidsendpoint;gcpcloudidsendpoints // +kubebuilder:subresource:status -// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=alpha";"cnrm.cloud.google.com/system=true";"cnrm.cloud.google.com/tf2crd=true" +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true";"cnrm.cloud.google.com/tf2crd=true" // +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" // +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" // +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" diff --git a/pkg/clients/generated/apis/cloudids/v1alpha1/doc.go b/pkg/clients/generated/apis/cloudids/v1beta1/doc.go similarity index 90% rename from pkg/clients/generated/apis/cloudids/v1alpha1/doc.go rename to pkg/clients/generated/apis/cloudids/v1beta1/doc.go index 77aaed1d16..7cf7675795 100644 --- a/pkg/clients/generated/apis/cloudids/v1alpha1/doc.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/doc.go @@ -28,14 +28,14 @@ // that future versions of the go-client may include breaking changes. // Please try it out and give us feedback! -// Package v1alpha1 contains API Schema definitions for the cloudids v1alpha1 API group. +// Package v1beta1 contains API Schema definitions for the cloudids v1beta1 API group. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/cloudids // +k8s:defaulter-gen=TypeMeta // +groupName=cloudids.cnrm.cloud.google.com -// Generate deepcopy object for cloudids/v1alpha1 API group +// Generate deepcopy object for cloudids/v1beta1 API group // //go:generate go run ../../../../../../scripts/deepcopy-gen/main.go -O zz_generated.deepcopy -i . -h ../../../../../../hack/boilerplate_client_alpha.go.txt -package v1alpha1 +package v1beta1 diff --git a/pkg/clients/generated/apis/cloudids/v1alpha1/register.go b/pkg/clients/generated/apis/cloudids/v1beta1/register.go similarity index 93% rename from pkg/clients/generated/apis/cloudids/v1alpha1/register.go rename to pkg/clients/generated/apis/cloudids/v1beta1/register.go index ea138ec217..bb72eccc37 100644 --- a/pkg/clients/generated/apis/cloudids/v1alpha1/register.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/register.go @@ -28,13 +28,13 @@ // that future versions of the go-client may include breaking changes. // Please try it out and give us feedback! -// Package v1alpha1 contains API Schema definitions for the cloudids v1alpha1 API group. +// Package v1beta1 contains API Schema definitions for the cloudids v1beta1 API group. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/cloudids // +k8s:defaulter-gen=TypeMeta // +groupName=cloudids.cnrm.cloud.google.com -package v1alpha1 +package v1beta1 import ( "reflect" @@ -45,7 +45,7 @@ import ( var ( // SchemeGroupVersion is the group version used to register these objects. - SchemeGroupVersion = schema.GroupVersion{Group: "cloudids.cnrm.cloud.google.com", Version: "v1alpha1"} + SchemeGroupVersion = schema.GroupVersion{Group: "cloudids.cnrm.cloud.google.com", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} diff --git a/pkg/clients/generated/apis/cloudids/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go similarity index 85% rename from pkg/clients/generated/apis/cloudids/v1alpha1/zz_generated.deepcopy.go rename to pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go index c4794a4b0d..37865236f2 100644 --- a/pkg/clients/generated/apis/cloudids/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go @@ -22,10 +22,10 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( - k8sv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -128,9 +128,35 @@ func (in *CloudIDSEndpointStatus) DeepCopyInto(out *CloudIDSEndpointStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]k8sv1alpha1.Condition, len(*in)) + *out = make([]v1alpha1.Condition, len(*in)) copy(*out, *in) } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ObservedState != nil { + in, out := &in.ObservedState, &out.ObservedState + *out = new(EndpointObservedStateStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIDSEndpointStatus. +func (in *CloudIDSEndpointStatus) DeepCopy() *CloudIDSEndpointStatus { + if in == nil { + return nil + } + out := new(CloudIDSEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservedStateStatus) DeepCopyInto(out *EndpointObservedStateStatus) { + *out = *in if in.CreateTime != nil { in, out := &in.CreateTime, &out.CreateTime *out = new(string) @@ -146,11 +172,6 @@ func (in *CloudIDSEndpointStatus) DeepCopyInto(out *CloudIDSEndpointStatus) { *out = new(string) **out = **in } - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } if in.UpdateTime != nil { in, out := &in.UpdateTime, &out.UpdateTime *out = new(string) @@ -159,12 +180,12 @@ func (in *CloudIDSEndpointStatus) DeepCopyInto(out *CloudIDSEndpointStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudIDSEndpointStatus. -func (in *CloudIDSEndpointStatus) DeepCopy() *CloudIDSEndpointStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservedStateStatus. +func (in *EndpointObservedStateStatus) DeepCopy() *EndpointObservedStateStatus { if in == nil { return nil } - out := new(CloudIDSEndpointStatus) + out := new(EndpointObservedStateStatus) in.DeepCopyInto(out) return out } diff --git a/pkg/clients/generated/client/clientset/versioned/clientset.go b/pkg/clients/generated/client/clientset/versioned/clientset.go index 97080f5197..f58e445b9a 100644 --- a/pkg/clients/generated/client/clientset/versioned/clientset.go +++ b/pkg/clients/generated/client/clientset/versioned/clientset.go @@ -52,7 +52,7 @@ import ( cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions2/v1alpha1" cloudidentityv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudidentity/v1beta1" - cloudidsv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1" + cloudidsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1" cloudiotv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudiot/v1alpha1" cloudschedulerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudscheduler/v1beta1" cloudtasksv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudtasks/v1alpha1" @@ -177,7 +177,7 @@ type Interface interface { CloudfunctionsV1beta1() cloudfunctionsv1beta1.CloudfunctionsV1beta1Interface Cloudfunctions2V1alpha1() cloudfunctions2v1alpha1.Cloudfunctions2V1alpha1Interface CloudidentityV1beta1() cloudidentityv1beta1.CloudidentityV1beta1Interface - CloudidsV1alpha1() cloudidsv1alpha1.CloudidsV1alpha1Interface + CloudidsV1beta1() cloudidsv1beta1.CloudidsV1beta1Interface CloudiotV1alpha1() cloudiotv1alpha1.CloudiotV1alpha1Interface CloudschedulerV1beta1() cloudschedulerv1beta1.CloudschedulerV1beta1Interface CloudtasksV1alpha1() cloudtasksv1alpha1.CloudtasksV1alpha1Interface @@ -300,7 +300,7 @@ type Clientset struct { cloudfunctionsV1beta1 *cloudfunctionsv1beta1.CloudfunctionsV1beta1Client cloudfunctions2V1alpha1 *cloudfunctions2v1alpha1.Cloudfunctions2V1alpha1Client cloudidentityV1beta1 *cloudidentityv1beta1.CloudidentityV1beta1Client - cloudidsV1alpha1 *cloudidsv1alpha1.CloudidsV1alpha1Client + cloudidsV1beta1 *cloudidsv1beta1.CloudidsV1beta1Client cloudiotV1alpha1 *cloudiotv1alpha1.CloudiotV1alpha1Client cloudschedulerV1beta1 *cloudschedulerv1beta1.CloudschedulerV1beta1Client cloudtasksV1alpha1 *cloudtasksv1alpha1.CloudtasksV1alpha1Client @@ -528,9 +528,9 @@ func (c *Clientset) CloudidentityV1beta1() cloudidentityv1beta1.CloudidentityV1b return c.cloudidentityV1beta1 } -// CloudidsV1alpha1 retrieves the CloudidsV1alpha1Client -func (c *Clientset) CloudidsV1alpha1() cloudidsv1alpha1.CloudidsV1alpha1Interface { - return c.cloudidsV1alpha1 +// CloudidsV1beta1 retrieves the CloudidsV1beta1Client +func (c *Clientset) CloudidsV1beta1() cloudidsv1beta1.CloudidsV1beta1Interface { + return c.cloudidsV1beta1 } // CloudiotV1alpha1 retrieves the CloudiotV1alpha1Client @@ -1135,7 +1135,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.cloudidsV1alpha1, err = cloudidsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.cloudidsV1beta1, err = cloudidsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -1547,7 +1547,7 @@ func New(c rest.Interface) *Clientset { cs.cloudfunctionsV1beta1 = cloudfunctionsv1beta1.New(c) cs.cloudfunctions2V1alpha1 = cloudfunctions2v1alpha1.New(c) cs.cloudidentityV1beta1 = cloudidentityv1beta1.New(c) - cs.cloudidsV1alpha1 = cloudidsv1alpha1.New(c) + cs.cloudidsV1beta1 = cloudidsv1beta1.New(c) cs.cloudiotV1alpha1 = cloudiotv1alpha1.New(c) cs.cloudschedulerV1beta1 = cloudschedulerv1beta1.New(c) cs.cloudtasksV1alpha1 = cloudtasksv1alpha1.New(c) diff --git a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go index 18c95e3de3..efbc71413e 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go @@ -77,8 +77,8 @@ import ( fakecloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions2/v1alpha1/fake" cloudidentityv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudidentity/v1beta1" fakecloudidentityv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudidentity/v1beta1/fake" - cloudidsv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1" - fakecloudidsv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake" + cloudidsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1" + fakecloudidsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake" cloudiotv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudiot/v1alpha1" fakecloudiotv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudiot/v1alpha1/fake" cloudschedulerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudscheduler/v1beta1" @@ -451,9 +451,9 @@ func (c *Clientset) CloudidentityV1beta1() cloudidentityv1beta1.CloudidentityV1b return &fakecloudidentityv1beta1.FakeCloudidentityV1beta1{Fake: &c.Fake} } -// CloudidsV1alpha1 retrieves the CloudidsV1alpha1Client -func (c *Clientset) CloudidsV1alpha1() cloudidsv1alpha1.CloudidsV1alpha1Interface { - return &fakecloudidsv1alpha1.FakeCloudidsV1alpha1{Fake: &c.Fake} +// CloudidsV1beta1 retrieves the CloudidsV1beta1Client +func (c *Clientset) CloudidsV1beta1() cloudidsv1beta1.CloudidsV1beta1Interface { + return &fakecloudidsv1beta1.FakeCloudidsV1beta1{Fake: &c.Fake} } // CloudiotV1alpha1 retrieves the CloudiotV1alpha1Client diff --git a/pkg/clients/generated/client/clientset/versioned/fake/register.go b/pkg/clients/generated/client/clientset/versioned/fake/register.go index 6ff456abe7..2c1c132322 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/register.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/register.go @@ -49,7 +49,7 @@ import ( cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions2/v1alpha1" cloudidentityv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudidentity/v1beta1" - cloudidsv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1alpha1" + cloudidsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1beta1" cloudiotv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudiot/v1alpha1" cloudschedulerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudscheduler/v1beta1" cloudtasksv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudtasks/v1alpha1" @@ -178,7 +178,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ cloudfunctionsv1beta1.AddToScheme, cloudfunctions2v1alpha1.AddToScheme, cloudidentityv1beta1.AddToScheme, - cloudidsv1alpha1.AddToScheme, + cloudidsv1beta1.AddToScheme, cloudiotv1alpha1.AddToScheme, cloudschedulerv1beta1.AddToScheme, cloudtasksv1alpha1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/scheme/register.go b/pkg/clients/generated/client/clientset/versioned/scheme/register.go index 0eda9dfd9b..05dba3ff06 100644 --- a/pkg/clients/generated/client/clientset/versioned/scheme/register.go +++ b/pkg/clients/generated/client/clientset/versioned/scheme/register.go @@ -49,7 +49,7 @@ import ( cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions2/v1alpha1" cloudidentityv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudidentity/v1beta1" - cloudidsv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1alpha1" + cloudidsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1beta1" cloudiotv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudiot/v1alpha1" cloudschedulerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudscheduler/v1beta1" cloudtasksv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudtasks/v1alpha1" @@ -178,7 +178,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ cloudfunctionsv1beta1.AddToScheme, cloudfunctions2v1alpha1.AddToScheme, cloudidentityv1beta1.AddToScheme, - cloudidsv1alpha1.AddToScheme, + cloudidsv1beta1.AddToScheme, cloudiotv1alpha1.AddToScheme, cloudschedulerv1beta1.AddToScheme, cloudtasksv1alpha1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudids_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudids_client.go similarity index 67% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudids_client.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudids_client.go index 438a7ccae2..144df8afa3 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudids_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudids_client.go @@ -19,34 +19,34 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "net/http" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) -type CloudidsV1alpha1Interface interface { +type CloudidsV1beta1Interface interface { RESTClient() rest.Interface CloudIDSEndpointsGetter } -// CloudidsV1alpha1Client is used to interact with features provided by the cloudids.cnrm.cloud.google.com group. -type CloudidsV1alpha1Client struct { +// CloudidsV1beta1Client is used to interact with features provided by the cloudids.cnrm.cloud.google.com group. +type CloudidsV1beta1Client struct { restClient rest.Interface } -func (c *CloudidsV1alpha1Client) CloudIDSEndpoints(namespace string) CloudIDSEndpointInterface { +func (c *CloudidsV1beta1Client) CloudIDSEndpoints(namespace string) CloudIDSEndpointInterface { return newCloudIDSEndpoints(c, namespace) } -// NewForConfig creates a new CloudidsV1alpha1Client for the given config. +// NewForConfig creates a new CloudidsV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*CloudidsV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*CloudidsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -58,9 +58,9 @@ func NewForConfig(c *rest.Config) (*CloudidsV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new CloudidsV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new CloudidsV1beta1Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CloudidsV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CloudidsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -69,12 +69,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CloudidsV1alpha1Cli if err != nil { return nil, err } - return &CloudidsV1alpha1Client{client}, nil + return &CloudidsV1beta1Client{client}, nil } -// NewForConfigOrDie creates a new CloudidsV1alpha1Client for the given config and +// NewForConfigOrDie creates a new CloudidsV1beta1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CloudidsV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *CloudidsV1beta1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -82,13 +82,13 @@ func NewForConfigOrDie(c *rest.Config) *CloudidsV1alpha1Client { return client } -// New creates a new CloudidsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *CloudidsV1alpha1Client { - return &CloudidsV1alpha1Client{c} +// New creates a new CloudidsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CloudidsV1beta1Client { + return &CloudidsV1beta1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -102,7 +102,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *CloudidsV1alpha1Client) RESTClient() rest.Interface { +func (c *CloudidsV1beta1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudidsendpoint.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudidsendpoint.go similarity index 78% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudidsendpoint.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudidsendpoint.go index 48e1a9496e..f3a1989e36 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/cloudidsendpoint.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/cloudidsendpoint.go @@ -19,13 +19,13 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "context" "time" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1beta1" scheme "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -41,15 +41,15 @@ type CloudIDSEndpointsGetter interface { // CloudIDSEndpointInterface has methods to work with CloudIDSEndpoint resources. type CloudIDSEndpointInterface interface { - Create(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.CreateOptions) (*v1alpha1.CloudIDSEndpoint, error) - Update(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1alpha1.CloudIDSEndpoint, error) - UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1alpha1.CloudIDSEndpoint, error) + Create(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.CreateOptions) (*v1beta1.CloudIDSEndpoint, error) + Update(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1beta1.CloudIDSEndpoint, error) + UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1beta1.CloudIDSEndpoint, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CloudIDSEndpoint, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CloudIDSEndpointList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CloudIDSEndpoint, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CloudIDSEndpointList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudIDSEndpoint, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudIDSEndpoint, err error) CloudIDSEndpointExpansion } @@ -60,7 +60,7 @@ type cloudIDSEndpoints struct { } // newCloudIDSEndpoints returns a CloudIDSEndpoints -func newCloudIDSEndpoints(c *CloudidsV1alpha1Client, namespace string) *cloudIDSEndpoints { +func newCloudIDSEndpoints(c *CloudidsV1beta1Client, namespace string) *cloudIDSEndpoints { return &cloudIDSEndpoints{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newCloudIDSEndpoints(c *CloudidsV1alpha1Client, namespace string) *cloudIDS } // Get takes name of the cloudIDSEndpoint, and returns the corresponding cloudIDSEndpoint object, and an error if there is any. -func (c *cloudIDSEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { - result = &v1alpha1.CloudIDSEndpoint{} +func (c *cloudIDSEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudIDSEndpoint, err error) { + result = &v1beta1.CloudIDSEndpoint{} err = c.client.Get(). Namespace(c.ns). Resource("cloudidsendpoints"). @@ -81,12 +81,12 @@ func (c *cloudIDSEndpoints) Get(ctx context.Context, name string, options v1.Get } // List takes label and field selectors, and returns the list of CloudIDSEndpoints that match those selectors. -func (c *cloudIDSEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudIDSEndpointList, err error) { +func (c *cloudIDSEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudIDSEndpointList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.CloudIDSEndpointList{} + result = &v1beta1.CloudIDSEndpointList{} err = c.client.Get(). Namespace(c.ns). Resource("cloudidsendpoints"). @@ -113,8 +113,8 @@ func (c *cloudIDSEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (wat } // Create takes the representation of a cloudIDSEndpoint and creates it. Returns the server's representation of the cloudIDSEndpoint, and an error, if there is any. -func (c *cloudIDSEndpoints) Create(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.CreateOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { - result = &v1alpha1.CloudIDSEndpoint{} +func (c *cloudIDSEndpoints) Create(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.CreateOptions) (result *v1beta1.CloudIDSEndpoint, err error) { + result = &v1beta1.CloudIDSEndpoint{} err = c.client.Post(). Namespace(c.ns). Resource("cloudidsendpoints"). @@ -126,8 +126,8 @@ func (c *cloudIDSEndpoints) Create(ctx context.Context, cloudIDSEndpoint *v1alph } // Update takes the representation of a cloudIDSEndpoint and updates it. Returns the server's representation of the cloudIDSEndpoint, and an error, if there is any. -func (c *cloudIDSEndpoints) Update(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { - result = &v1alpha1.CloudIDSEndpoint{} +func (c *cloudIDSEndpoints) Update(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1beta1.CloudIDSEndpoint, err error) { + result = &v1beta1.CloudIDSEndpoint{} err = c.client.Put(). Namespace(c.ns). Resource("cloudidsendpoints"). @@ -141,8 +141,8 @@ func (c *cloudIDSEndpoints) Update(ctx context.Context, cloudIDSEndpoint *v1alph // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cloudIDSEndpoints) UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { - result = &v1alpha1.CloudIDSEndpoint{} +func (c *cloudIDSEndpoints) UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1beta1.CloudIDSEndpoint, err error) { + result = &v1beta1.CloudIDSEndpoint{} err = c.client.Put(). Namespace(c.ns). Resource("cloudidsendpoints"). @@ -183,8 +183,8 @@ func (c *cloudIDSEndpoints) DeleteCollection(ctx context.Context, opts v1.Delete } // Patch applies the patch and returns the patched cloudIDSEndpoint. -func (c *cloudIDSEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudIDSEndpoint, err error) { - result = &v1alpha1.CloudIDSEndpoint{} +func (c *cloudIDSEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudIDSEndpoint, err error) { + result = &v1beta1.CloudIDSEndpoint{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cloudidsendpoints"). diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/doc.go similarity index 98% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/doc.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/doc.go index 61f2499ab1..0bf01d15b1 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/doc.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/doc.go @@ -20,4 +20,4 @@ // Code generated by main. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1beta1 diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/doc.go similarity index 100% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/doc.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/doc.go diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudids_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudids_client.go similarity index 77% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudids_client.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudids_client.go index 40dc84df7c..b4f4cc28e5 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudids_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudids_client.go @@ -22,22 +22,22 @@ package fake import ( - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeCloudidsV1alpha1 struct { +type FakeCloudidsV1beta1 struct { *testing.Fake } -func (c *FakeCloudidsV1alpha1) CloudIDSEndpoints(namespace string) v1alpha1.CloudIDSEndpointInterface { +func (c *FakeCloudidsV1beta1) CloudIDSEndpoints(namespace string) v1beta1.CloudIDSEndpointInterface { return &FakeCloudIDSEndpoints{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeCloudidsV1alpha1) RESTClient() rest.Interface { +func (c *FakeCloudidsV1beta1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudidsendpoint.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudidsendpoint.go similarity index 71% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudidsendpoint.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudidsendpoint.go index a1d19c9db0..b74a5103d0 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/fake/fake_cloudidsendpoint.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/fake/fake_cloudidsendpoint.go @@ -24,7 +24,7 @@ package fake import ( "context" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudids/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" @@ -34,29 +34,29 @@ import ( // FakeCloudIDSEndpoints implements CloudIDSEndpointInterface type FakeCloudIDSEndpoints struct { - Fake *FakeCloudidsV1alpha1 + Fake *FakeCloudidsV1beta1 ns string } -var cloudidsendpointsResource = v1alpha1.SchemeGroupVersion.WithResource("cloudidsendpoints") +var cloudidsendpointsResource = v1beta1.SchemeGroupVersion.WithResource("cloudidsendpoints") -var cloudidsendpointsKind = v1alpha1.SchemeGroupVersion.WithKind("CloudIDSEndpoint") +var cloudidsendpointsKind = v1beta1.SchemeGroupVersion.WithKind("CloudIDSEndpoint") // Get takes name of the cloudIDSEndpoint, and returns the corresponding cloudIDSEndpoint object, and an error if there is any. -func (c *FakeCloudIDSEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { +func (c *FakeCloudIDSEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudIDSEndpoint, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(cloudidsendpointsResource, c.ns, name), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewGetAction(cloudidsendpointsResource, c.ns, name), &v1beta1.CloudIDSEndpoint{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudIDSEndpoint), err + return obj.(*v1beta1.CloudIDSEndpoint), err } // List takes label and field selectors, and returns the list of CloudIDSEndpoints that match those selectors. -func (c *FakeCloudIDSEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudIDSEndpointList, err error) { +func (c *FakeCloudIDSEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudIDSEndpointList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(cloudidsendpointsResource, cloudidsendpointsKind, c.ns, opts), &v1alpha1.CloudIDSEndpointList{}) + Invokes(testing.NewListAction(cloudidsendpointsResource, cloudidsendpointsKind, c.ns, opts), &v1beta1.CloudIDSEndpointList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakeCloudIDSEndpoints) List(ctx context.Context, opts v1.ListOptions) ( if label == nil { label = labels.Everything() } - list := &v1alpha1.CloudIDSEndpointList{ListMeta: obj.(*v1alpha1.CloudIDSEndpointList).ListMeta} - for _, item := range obj.(*v1alpha1.CloudIDSEndpointList).Items { + list := &v1beta1.CloudIDSEndpointList{ListMeta: obj.(*v1beta1.CloudIDSEndpointList).ListMeta} + for _, item := range obj.(*v1beta1.CloudIDSEndpointList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,43 +83,43 @@ func (c *FakeCloudIDSEndpoints) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a cloudIDSEndpoint and creates it. Returns the server's representation of the cloudIDSEndpoint, and an error, if there is any. -func (c *FakeCloudIDSEndpoints) Create(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.CreateOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { +func (c *FakeCloudIDSEndpoints) Create(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.CreateOptions) (result *v1beta1.CloudIDSEndpoint, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(cloudidsendpointsResource, c.ns, cloudIDSEndpoint), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewCreateAction(cloudidsendpointsResource, c.ns, cloudIDSEndpoint), &v1beta1.CloudIDSEndpoint{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudIDSEndpoint), err + return obj.(*v1beta1.CloudIDSEndpoint), err } // Update takes the representation of a cloudIDSEndpoint and updates it. Returns the server's representation of the cloudIDSEndpoint, and an error, if there is any. -func (c *FakeCloudIDSEndpoints) Update(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1alpha1.CloudIDSEndpoint, err error) { +func (c *FakeCloudIDSEndpoints) Update(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (result *v1beta1.CloudIDSEndpoint, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cloudidsendpointsResource, c.ns, cloudIDSEndpoint), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewUpdateAction(cloudidsendpointsResource, c.ns, cloudIDSEndpoint), &v1beta1.CloudIDSEndpoint{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudIDSEndpoint), err + return obj.(*v1beta1.CloudIDSEndpoint), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCloudIDSEndpoints) UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1alpha1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1alpha1.CloudIDSEndpoint, error) { +func (c *FakeCloudIDSEndpoints) UpdateStatus(ctx context.Context, cloudIDSEndpoint *v1beta1.CloudIDSEndpoint, opts v1.UpdateOptions) (*v1beta1.CloudIDSEndpoint, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cloudidsendpointsResource, "status", c.ns, cloudIDSEndpoint), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewUpdateSubresourceAction(cloudidsendpointsResource, "status", c.ns, cloudIDSEndpoint), &v1beta1.CloudIDSEndpoint{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudIDSEndpoint), err + return obj.(*v1beta1.CloudIDSEndpoint), err } // Delete takes name of the cloudIDSEndpoint and deletes it. Returns an error if one occurs. func (c *FakeCloudIDSEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cloudidsendpointsResource, c.ns, name, opts), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewDeleteActionWithOptions(cloudidsendpointsResource, c.ns, name, opts), &v1beta1.CloudIDSEndpoint{}) return err } @@ -128,17 +128,17 @@ func (c *FakeCloudIDSEndpoints) Delete(ctx context.Context, name string, opts v1 func (c *FakeCloudIDSEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(cloudidsendpointsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.CloudIDSEndpointList{}) + _, err := c.Fake.Invokes(action, &v1beta1.CloudIDSEndpointList{}) return err } // Patch applies the patch and returns the patched cloudIDSEndpoint. -func (c *FakeCloudIDSEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudIDSEndpoint, err error) { +func (c *FakeCloudIDSEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudIDSEndpoint, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cloudidsendpointsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudIDSEndpoint{}) + Invokes(testing.NewPatchSubresourceAction(cloudidsendpointsResource, c.ns, name, pt, data, subresources...), &v1beta1.CloudIDSEndpoint{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudIDSEndpoint), err + return obj.(*v1beta1.CloudIDSEndpoint), err } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/generated_expansion.go similarity index 98% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/generated_expansion.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/generated_expansion.go index a65e2db41b..73c8eb8442 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1alpha1/generated_expansion.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudids/v1beta1/generated_expansion.go @@ -19,6 +19,6 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 type CloudIDSEndpointExpansion interface{} diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md index 210c7ed632..687313a46c 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -383,7 +383,7 @@ spec: severity: INFORMATIONAL location: us-west2-a projectRef: - external: projects/${PROJECT_ID?} + external: ${PROJECT_ID?} --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork From e11ae7528bdc53a39a875b11e256a6683b332aeb Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 20 Jun 2024 22:51:16 +0000 Subject: [PATCH 018/101] chore: update acronyms Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- tests/apichecks/testdata/exceptions/acronyms.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 73a1a6ac41..84649cd7a1 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -77,7 +77,8 @@ [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".spec.sourceArchiveUrl" should be ".spec.sourceArchiveURL" [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".status.sourceRepository.deployedUrl" should be ".status.sourceRepository.deployedURL" [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".status.versionId" should be ".status.versionID" -[acronyms] crd=cloudidsendpoints.cloudids.cnrm.cloud.google.com version=v1alpha1: field ".status.endpointIp" should be ".status.endpointIP" +[acronyms] crd=cloudidsendpoints.cloudids.cnrm.cloud.google.com version=v1alpha1: field ".status.observedState.endpointIp" should be ".status.observedState.endpointIP" +[acronyms] crd=cloudidsendpoints.cloudids.cnrm.cloud.google.com version=v1beta1: field ".status.observedState.endpointIp" should be ".status.observedState.endpointIP" [acronyms] crd=cloudiotdevices.cloudiot.cnrm.cloud.google.com version=v1alpha1: field ".spec.gatewayConfig.lastAccessedGatewayId" should be ".spec.gatewayConfig.lastAccessedGatewayID" [acronyms] crd=cloudiotdevices.cloudiot.cnrm.cloud.google.com version=v1alpha1: field ".status.numId" should be ".status.numID" [acronyms] crd=cloudschedulerjobs.cloudscheduler.cnrm.cloud.google.com version=v1beta1: field ".spec.appEngineHttpTarget" should be ".spec.appEngineHTTPTarget" From 0653658ef430127afc6b1cc1df8b199728f14131 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 22:49:02 +0000 Subject: [PATCH 019/101] chore: fixtures use v1beta1 Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml | 2 +- .../basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml index 24480b6c74..418823fb9a 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: cloudids.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 kind: CloudIDSEndpoint metadata: name: cloudidsendpoint-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml index d86f5fdcff..53b1c60932 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: cloudids.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 kind: CloudIDSEndpoint metadata: name: cloudidsendpoint-${uniqueId} From 3af23aeeb60faf04cceb32c17c6e62b15e797180 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 23:14:07 +0000 Subject: [PATCH 020/101] tests: resourceskeleton Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- pkg/resourceskeleton/testdata/asset-skeleton.yaml | 15 +++++++++++++++ pkg/resourceskeleton/testdata/uri-skeleton.yaml | 11 +++++++++++ 2 files changed, 26 insertions(+) diff --git a/pkg/resourceskeleton/testdata/asset-skeleton.yaml b/pkg/resourceskeleton/testdata/asset-skeleton.yaml index 2174a99693..f6160f2dcc 100644 --- a/pkg/resourceskeleton/testdata/asset-skeleton.yaml +++ b/pkg/resourceskeleton/testdata/asset-skeleton.yaml @@ -1204,3 +1204,18 @@ resourceConfigId: VertexAIEndpoint # not supported by asset inventory - resourceConfigId: VertexAIIndex +- asset: + ancestors: + - projects/1234567890 + name: //ids.googleapis.com/projects/kcc-test/locations/us-west2-a/endpoints/cloudidsendpoint-ppdtifqxy4vqi4ezx7la + asset_type: cloudids.googleapis.com/Endpoint + expectedSkeleton: + apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 + kind: CloudIDSEndpoint + metadata: + name: cloudidsendpoint-ppdtifqxy4vqi4ezx7la + spec: + location: us-west2-a + projectRef: + external: kcc-test + resourceConfigId: CloudIDSEndpoint diff --git a/pkg/resourceskeleton/testdata/uri-skeleton.yaml b/pkg/resourceskeleton/testdata/uri-skeleton.yaml index b5e524ee2e..e85eac0ad6 100644 --- a/pkg/resourceskeleton/testdata/uri-skeleton.yaml +++ b/pkg/resourceskeleton/testdata/uri-skeleton.yaml @@ -1142,3 +1142,14 @@ region: us-central1 ResourceConfigId: VertexAIIndex URI: "https://us-central1-aiplatform.googleapis.com/v1beta1/projects/kcc-test/locations/us-central1/indexes/vertexaiindex-test" +- ExpectedSkeleton: + apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 + kind: CloudIDSEndpoint + metadata: + name: cloudidsendpoint-ppdtifqxy4vqi4ezx7la + spec: + projectRef: + external: kcc-test + region: us-west2-a + ResourceConfigId: CloudIDSEndpoint + URI: "https://ids.googleapis.com/projects/kcc-test/locations/us-west2-a/endpoints/cloudidsendpoint-ppdtifqxy4vqi4ezx7la" \ No newline at end of file From 081473e2b9fdabf95aa5b055d150ddd91a729467 Mon Sep 17 00:00:00 2001 From: zicongmei Date: Tue, 25 Jun 2024 11:21:53 -0700 Subject: [PATCH 021/101] Update the composition samples --- .../samples/AttachedAKS/01-composition.yaml | 1 + .../samples/AttachedAKS/aks-alice.sh | 10 +++---- .../samples/AttachedEKS/eks-alice.sh | 28 +++++++++---------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/experiments/compositions/samples/AttachedAKS/01-composition.yaml b/experiments/compositions/samples/AttachedAKS/01-composition.yaml index d969a61560..7215a0f513 100644 --- a/experiments/compositions/samples/AttachedAKS/01-composition.yaml +++ b/experiments/compositions/samples/AttachedAKS/01-composition.yaml @@ -50,6 +50,7 @@ spec: group: "" version: v1 kind: ConfigMap + resource: issuer nameSuffix: "-issuer" fieldRef: - path: ".data.oidc" diff --git a/experiments/compositions/samples/AttachedAKS/aks-alice.sh b/experiments/compositions/samples/AttachedAKS/aks-alice.sh index 9702524e93..cb9fd01b1f 100644 --- a/experiments/compositions/samples/AttachedAKS/aks-alice.sh +++ b/experiments/compositions/samples/AttachedAKS/aks-alice.sh @@ -34,15 +34,15 @@ kubectl apply -f /tmp/install-agent-${AKS_NAME}.yaml ## Commands to check progress -kubectl get AttachedAKS -n alice-2 +kubectl get AttachedAKS -n team-aks kubectl get ResourceGroup.resources.azure.com \ - -n alice-2 + -n team-aks kubectl get managedcluster.containerservice.azure.com \ - -n alice-2 + -n team-aks -kubectl get cm -n alice-2 +kubectl get cm -n team-aks kubectl get containerattachedcluster \ - -n alice-2 + -n team-aks diff --git a/experiments/compositions/samples/AttachedEKS/eks-alice.sh b/experiments/compositions/samples/AttachedEKS/eks-alice.sh index 2fa26fb92d..1b80df6dc6 100644 --- a/experiments/compositions/samples/AttachedEKS/eks-alice.sh +++ b/experiments/compositions/samples/AttachedEKS/eks-alice.sh @@ -33,43 +33,43 @@ kubectl apply -f /tmp/install-agent-${EKS_NAME}.yaml ## Commands to check progress -kubectl get AttachedEKS -n alice-1 +kubectl get AttachedEKS -n team-eks kubectl get vpc.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get InternetGateway.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get RouteTable.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get subnet.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get ElasticIPAddress.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get NATGateway.ec2.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get role.iam.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get cluster.eks.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get Nodegroup.eks.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get AccessEntry.eks.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get FieldExport.services.k8s.aws \ - -n alice-1 + -n team-eks kubectl get cm \ - -n alice-1 + -n team-eks kubectl get containerattachedcluster \ - -n alice-1 \ No newline at end of file + -n team-eks \ No newline at end of file From 66843992d53c9a3e6cddac6b7bf91bfbf47417c1 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:20:29 +0000 Subject: [PATCH 022/101] chore: update sample names Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../cloudids_v1beta1_cloudidsendpoint.yaml | 2 +- ...dep.yaml => cloudids_v1beta1_cloudnetwork.yaml} | 2 +- ...p.yaml => cloudids_v1beta1_computeaddress.yaml} | 4 ++-- ...udids_v1beta1_servicenetworkingconnection.yaml} | 6 +++--- .../resource-docs/cloudids/cloudidsendpoint.md | 14 +++++++------- .../resource-reference/overview.md | 8 ++++---- 6 files changed, 18 insertions(+), 18 deletions(-) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_cloudnetwork-dep.yaml => cloudids_v1beta1_cloudnetwork.yaml} (95%) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_computeaddress-dep.yaml => cloudids_v1beta1_computeaddress.yaml} (92%) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_servicenetworkingconnection-dep.yaml => cloudids_v1beta1_servicenetworkingconnection.yaml} (88%) diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml index 17acff903d..8e3353a272 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml @@ -18,7 +18,7 @@ metadata: name: cloudidsendpoint-sample spec: networkRef: - name: computenetwork-dep + name: computenetwork-dep1 severity: INFORMATIONAL location: us-west2-a projectRef: diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml similarity index 95% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml rename to config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml index d10a84d5c0..dbef8e6d4e 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork-dep.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml @@ -15,6 +15,6 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork metadata: - name: computenetwork-dep + name: cloudidsendpoint-dep1 spec: autoCreateSubnetworks: false \ No newline at end of file diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml similarity index 92% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml rename to config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml index 21a04ddd7b..bc998f0963 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress-dep.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml @@ -15,11 +15,11 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: computeaddress-dep + name: cloudidsendpoint-dep2 spec: location: global addressType: INTERNAL networkRef: - name: computenetwork-dep + name: cloudidsendpoint-dep1 prefixLength: 16 purpose: VPC_PEERING diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml similarity index 88% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml rename to config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml index 77a0d26257..fa102f5c6f 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection-dep.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml @@ -15,10 +15,10 @@ apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: servicenetworkingconnection-dep + name: cloudidsendpoint-dep3 spec: networkRef: - name: computenetwork-dep + name: cloudidsendpoint-dep1 reservedPeeringRanges: - - name: computeaddress-dep + - name: cloudidsendpoint-dep2 service: servicenetworking.googleapis.com \ No newline at end of file diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md index 687313a46c..6e85c18173 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -379,7 +379,7 @@ metadata: name: cloudidsendpoint-sample spec: networkRef: - name: computenetwork-dep + name: cloudidsendpoint-dep1 severity: INFORMATIONAL location: us-west2-a projectRef: @@ -388,31 +388,31 @@ spec: apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork metadata: - name: computenetwork-dep + name: cloudidsendpoint-dep1 spec: autoCreateSubnetworks: false --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: computeaddress-dep + name: cloudidsendpoint-dep2 spec: location: global addressType: INTERNAL networkRef: - name: computenetwork-dep + name: cloudidsendpoint-dep1 prefixLength: 16 purpose: VPC_PEERING --- apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: servicenetworkingconnection-dep + name: cloudidsendpoint-dep3 spec: networkRef: - name: computenetwork-dep + name: cloudidsendpoint-dep1 reservedPeeringRanges: - - name: computeaddress-dep + - name: cloudidsendpoint-dep2 service: servicenetworking.googleapis.com ``` diff --git a/scripts/generate-google3-docs/resource-reference/overview.md b/scripts/generate-google3-docs/resource-reference/overview.md index ed316d4355..c318350d25 100644 --- a/scripts/generate-google3-docs/resource-reference/overview.md +++ b/scripts/generate-google3-docs/resource-reference/overview.md @@ -129,6 +129,10 @@ issues for {{product_name_short}}. {{cloudid_name}} CloudIdentityMembership + + {{cloudids_endpoint_name}} + CloudIDSEndpoint + {{scheduler_name}} CloudSchedulerJob @@ -842,10 +846,6 @@ issues for {{product_name_short}}. {{serverless_vpc_access_name}} VPCAccessConnector - - {{serverless_vpc_access_name}} - CloudIDSEndpoint - From 96ee7388c3f924cb547dcfeece60020698aaa43e Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 15:34:40 +0000 Subject: [PATCH 023/101] chore: restore autogen servicemappings Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../cloudids_v1beta1_cloudidsendpoint.yaml | 2 +- .../resource-autogen/allowlist/allowlist.go | 1 + .../generated/servicemappings/cloudids.yaml | 56 +++++++++++++++++++ .../embed/generated/assets_vfsdata.go | 8 +++ 4 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 scripts/resource-autogen/generated/servicemappings/cloudids.yaml diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml index 8e3353a272..f592b9d34f 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml @@ -18,7 +18,7 @@ metadata: name: cloudidsendpoint-sample spec: networkRef: - name: computenetwork-dep1 + name: cloudidsendpoint-dep1 severity: INFORMATIONAL location: us-west2-a projectRef: diff --git a/scripts/resource-autogen/allowlist/allowlist.go b/scripts/resource-autogen/allowlist/allowlist.go index b50e512990..41794d6bc1 100644 --- a/scripts/resource-autogen/allowlist/allowlist.go +++ b/scripts/resource-autogen/allowlist/allowlist.go @@ -69,6 +69,7 @@ var ( "cloud_asset/google_cloud_asset_folder_feed", "cloud_asset/google_cloud_asset_organization_feed", "cloud_asset/google_cloud_asset_project_feed", + "cloud_ids/google_cloud_ids_endpoint", "cloud_tasks/google_cloud_tasks_queue", "cloudfunctions2/google_cloudfunctions2_function", "cloudiot/google_cloudiot_device", diff --git a/scripts/resource-autogen/generated/servicemappings/cloudids.yaml b/scripts/resource-autogen/generated/servicemappings/cloudids.yaml new file mode 100644 index 0000000000..7f469a61d0 --- /dev/null +++ b/scripts/resource-autogen/generated/servicemappings/cloudids.yaml @@ -0,0 +1,56 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: core.cnrm.cloud.google.com/v1alpha1 +kind: ServiceMapping +metadata: + name: cloudids.cnrm.cloud.google.com + namespace: cnrm-system +spec: + name: CloudIDS + version: v1beta1 + serviceHostName: cloudids.googleapis.com + resources: + - name: google_cloud_ids_endpoint + kind: CloudIDSEndpoint + autoGenerated: true + idTemplate: "projects/{{project}}/locations/{{location}}/endpoints/{{name}}" + idTemplateCanBeUsedToMatchResourceName: false + resourceAvailableInAssetInventory: false + metadataMapping: + name: name + resourceID: + targetField: name + hierarchicalReferences: + - type: project + key: projectRef + resourceReferences: + - tfField: project + key: projectRef + description: |- + The project that this resource belongs to. + gvk: + kind: Project + version: v1beta1 + group: resourcemanager.cnrm.cloud.google.com + - tfField: network + key: networkRef + description: |- + Immutable. Name of the VPC network that is connected + to the IDS endpoint. This can either contain the VPC network name + itself (like "src-net") or the full URL to the network (like "projects/{project_id}/global/networks/src-net"). + gvk: + kind: ComputeNetwork + version: v1beta1 + group: compute.cnrm.cloud.google.com diff --git a/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go b/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go index 5198091c6f..9b27216f3e 100644 --- a/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go +++ b/scripts/resource-autogen/servicemapping/embed/generated/assets_vfsdata.go @@ -199,6 +199,13 @@ var Assets = func() http.FileSystem { compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x6f\xe2\x48\x10\xbd\xfb\x57\x3c\xc1\x65\x57\x4a\x20\xc9\xd1\xab\xd5\x8a\x21\x99\x8c\xb5\x09\x91\x80\xec\x68\x4e\x51\x61\x97\xed\x52\xda\xdd\xbd\xdd\x6d\x18\x84\xf8\xef\x2b\x7f\x90\x40\x32\x52\xe6\xb2\xd2\xf8\x82\xa8\x7a\xf5\xea\xd5\xab\xee\x1e\x62\x6a\xec\xd6\x49\x51\x06\x5c\x5d\x5c\x5d\xe1\xd6\x98\x42\x31\xee\xee\xa6\xd1\x30\x1a\xe2\x4e\x52\xd6\x9e\x33\xd4\x3a\x63\x87\x50\x32\x26\x96\xd2\x92\x0f\x99\x33\xfc\xc3\xce\x8b\xd1\xb8\x1a\x5d\xe0\xb7\x06\x30\xe8\x53\x83\xdf\xff\x88\x86\xd8\x9a\x1a\x15\x6d\xa1\x4d\x40\xed\x19\xa1\x14\x8f\x5c\x14\x83\xbf\xa7\x6c\x03\x44\x23\x35\x95\x55\x42\x3a\x65\x6c\x24\x94\x6d\x9b\x9e\x64\x14\x0d\xf1\xad\xa7\x30\xab\x40\xa2\x41\x48\x8d\xdd\xc2\xe4\xc7\x38\x50\x68\x05\xb7\x5f\x19\x82\x8d\xc7\xe3\xcd\x66\x33\xa2\x56\xed\xc8\xb8\x62\xac\x3a\xa4\x1f\xdf\x25\xd3\x9b\xd9\xe2\xe6\xfc\x6a\x74\xd1\xd6\x3c\x6a\xc5\xde\xc3\xf1\xbf\xb5\x38\xce\xb0\xda\x82\xac\x55\x92\xd2\x4a\x31\x14\x6d\x60\x1c\xa8\x70\xcc\x19\x82\x69\x04\x6f\x9c\x04\xd1\xc5\x19\xbc\xc9\xc3\x86\x1c\x47\x43\x64\xe2\x83\x93\x55\x1d\x4e\xdc\x3a\xc8\x13\x7f\x02\x30\x1a\xa4\x31\x98\x2c\x90\x2c\x06\xf8\x34\x59\x24\x8b\xb3\x68\x88\xaf\xc9\xf2\xcb\xc3\xe3\x12\x5f\x27\xf3\xf9\x64\xb6\x4c\x6e\x16\x78\x98\x63\xfa\x30\xbb\x4e\x96\xc9\xc3\x6c\x81\x87\xcf\x98\xcc\xbe\xe1\xef\x64\x76\x7d\x06\x96\x50\xb2\x03\x7f\xb7\xae\xd1\x6f\x1c\xa4\xf1\x91\xb3\xc6\xb4\x05\xf3\x89\x80\xdc\x74\x82\xbc\xe5\x54\x72\x49\xa1\x48\x17\x35\x15\x8c\xc2\xac\xd9\x69\xd1\x05\x2c\xbb\x4a\x7c\xb3\x4d\x0f\xd2\x59\x34\x84\x92\x4a\x02\x85\x36\xf2\x6e\xa8\x51\x14\x91\x95\x7e\xff\x31\x52\xe3\x78\x94\x6a\x57\x8d\x52\x65\xea\x6c\x54\xb4\x47\x69\x94\x9a\x6a\xbc\xbe\x24\x65\x4b\xba\x8c\x9e\x45\x67\x31\x16\xec\xd6\x92\xf2\x3d\x59\x2b\xba\x88\x2a\x0e\x94\x51\xa0\x38\x02\x34\x55\x1c\xa3\x25\x90\x8c\x75\x90\xb0\xfd\x31\x67\x8f\xf5\x96\xd2\xa6\x40\xbb\xea\xdc\x6f\x7d\xe0\x2a\x6a\x26\x7c\xa5\x9a\x36\x75\x49\x4f\x15\x01\xeb\x83\xdc\xf5\xe5\x8a\x03\x5d\x46\x80\xef\xe4\x7c\x31\x3e\xcc\x7e\xd0\xbe\xeb\x49\x56\x7c\xdf\xd7\xb1\x37\xb5\x4b\xd9\x37\x5d\x80\xf3\xbe\x53\x87\x7b\x6a\x8b\x9f\x0e\xd5\x4f\x85\x33\xb5\x8d\xba\x73\xd9\x4d\x7f\xa2\xe8\xf6\x28\x4d\x75\x30\xb7\xac\xd9\x51\xe0\x2c\x46\x70\x35\xf7\x19\xc9\x96\x5c\x59\x45\x81\x63\x0c\x76\xbb\xa6\xdf\x7e\x3f\x78\x97\x9c\x92\xfe\xc4\x8f\x9e\xb3\xa5\xb9\xa7\x90\x96\xf3\x5e\x68\x37\x55\x4e\xca\x1f\x08\x0f\x23\x4c\xd6\x24\xaa\x39\xe6\x89\x9e\x78\xcf\x21\xd1\x6b\xd6\xc1\xb8\xed\x29\xfc\xb0\xa1\x7e\x63\x71\x1f\x06\x14\xad\x58\xf9\xb8\xff\xed\xc3\x8d\x9f\xec\x5e\x26\x49\xae\x3f\x0b\xab\x2c\x6e\x6d\x7a\xd3\x3f\xb9\x7e\xe5\x0a\xe4\x0a\x0e\xef\xb1\xc0\x9a\x54\xcd\x47\x0e\xb4\x9e\xfa\xbf\x44\x4b\x10\x52\xad\x85\x53\xa3\x73\x29\xfe\xdc\xed\xfa\x60\xe7\xfb\x53\xda\x86\xf7\xfb\xf1\x6e\xd7\x92\x1c\x5c\xfb\x89\x9d\x3d\x55\x5c\xad\xd8\xf9\x52\x3e\x5a\xdf\xfd\x5b\xe0\xaf\xbb\xc8\xff\x7d\x35\xbb\x5d\x6b\xde\x7e\x3f\x7e\xb5\xcf\xbf\x75\xff\xb5\xc9\x9c\x73\x76\xac\x5f\xee\x52\xb7\x9b\x67\xde\xc6\x68\x79\xe6\x9c\xbf\xc4\x81\x90\xf7\x12\x8e\x2f\x55\xf3\x15\xeb\xe7\xf8\xe8\xef\x87\x37\xad\xd7\xfe\xfe\x29\x38\xa2\x6c\xe0\x3f\xfb\x12\x7d\xec\x13\x60\xc9\xb1\x0e\xfd\x69\xf8\x2f\x00\x00\xff\xff\x84\x11\x85\xbe\x78\x07\x00\x00"), }, + "/cloudids.yaml": &vfsgen۰CompressedFileInfo{ + name: "cloudids.yaml", + modTime: time.Time{}, + uncompressedSize: 2016, + + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\x4d\x6f\xdb\x38\x10\xbd\xeb\x57\x0c\xec\x4b\x0b\x24\x52\x92\xa3\xf7\xe4\x3a\x69\x2b\x6c\xea\x14\x96\xd3\xa2\xa7\x60\x4c\x8d\xa4\xd9\x50\x24\x97\x1c\xd9\x35\xb2\xfe\xef\x0b\x7d\x25\x71\xd2\x00\xd1\xc1\x20\x86\x6f\xde\x3c\xce\x1b\xd2\x53\x58\x58\xb7\xf7\x5c\x56\x02\x17\x67\x17\x17\xf0\xc5\xda\x52\x13\x5c\x5f\x2f\xa2\x69\x34\x85\x6b\x56\x64\x02\xe5\xd0\x98\x9c\x3c\x48\x45\x30\x77\xa8\x2a\x1a\x77\x4e\xe0\x07\xf9\xc0\xd6\xc0\x45\x7c\x06\x1f\x5a\xc0\x64\xd8\x9a\x7c\xfc\x2b\x9a\xc2\xde\x36\x50\xe3\x1e\x8c\x15\x68\x02\x81\x54\x1c\xa0\x60\x4d\x40\xbf\x15\x39\x01\x36\xa0\x6c\xed\x34\xa3\x51\x04\x3b\x96\xaa\x2b\x33\x90\xc4\xd1\x14\x7e\x0d\x14\x76\x23\xc8\x06\x10\x94\x75\x7b\xb0\xc5\x73\x1c\xa0\x74\x82\xbb\xaf\x12\x71\xb3\x24\xd9\xed\x76\x31\x76\x6a\x63\xeb\xcb\x44\xf7\xc8\x90\x5c\xa7\x8b\xab\x65\x76\x75\x7a\x11\x9f\x75\x39\xb7\x46\x53\x08\xe0\xe9\xdf\x86\x3d\xe5\xb0\xd9\x03\x3a\xa7\x59\xe1\x46\x13\x68\xdc\x81\xf5\x80\xa5\x27\xca\x41\x6c\x2b\x78\xe7\x59\xd8\x94\x27\x10\x6c\x21\x3b\xf4\x14\x4d\x21\xe7\x20\x9e\x37\x8d\x1c\x75\x6b\x94\xc7\xe1\x08\x60\x0d\xa0\x81\xc9\x3c\x83\x34\x9b\xc0\xa7\x79\x96\x66\x27\xd1\x14\x7e\xa6\xeb\xaf\x37\xb7\x6b\xf8\x39\x5f\xad\xe6\xcb\x75\x7a\x95\xc1\xcd\x0a\x16\x37\xcb\xcb\x74\x9d\xde\x2c\x33\xb8\xf9\x0c\xf3\xe5\x2f\xf8\x3b\x5d\x5e\x9e\x00\xb1\x54\xe4\x81\x7e\x3b\xdf\xea\xb7\x1e\xb8\xed\x23\xe5\x6d\xd3\x32\xa2\x23\x01\x85\xed\x05\x05\x47\x8a\x0b\x56\xa0\xd1\x94\x0d\x96\x04\xa5\xdd\x92\x37\x6c\x4a\x70\xe4\x6b\x0e\xad\x9b\x01\xd0\xe4\xd1\x14\x34\xd7\x2c\x28\x5d\xe4\xd5\xa1\xe2\x28\x42\xc7\x83\xff\x33\x50\xd6\x53\xac\x8c\xaf\x63\xa5\x6d\x93\xc7\x65\x37\x4a\xb1\xb2\x75\xb2\x3d\x47\xed\x2a\x3c\x8f\xee\xd9\xe4\x33\xc8\xc8\x6f\x59\xd1\x37\x74\x8e\x4d\x19\xd5\x24\x98\xa3\xe0\x2c\x02\x30\x58\xd3\x0c\x3a\x02\xce\xc3\x9f\xe9\x06\x58\x70\xa8\x5a\xac\xf1\xf5\x69\xd8\x07\xa1\x3a\x6a\x0f\xf7\xc4\xb2\x68\xf3\xd2\xcb\x2c\x02\xd8\x8e\x22\xb7\xe7\x1b\x12\x3c\x8f\x00\x42\x2f\xe2\xab\x0d\xb2\x3c\x2e\xda\x57\x42\xc7\x61\xa8\xe6\x29\xd8\xc6\x2b\x0a\x2d\x37\xc0\xe9\xc0\xdf\xe3\xee\xba\xbc\x3b\xce\xc3\x1d\x99\xdc\x59\x36\x12\xf5\x63\xd8\x1f\x76\x54\x71\x75\xbc\x89\x8d\xd8\x2f\x64\xc8\xa3\x50\x3e\x03\xf1\x0d\x0d\x3b\x9c\xaf\xa9\x76\x1a\x85\x66\x30\x71\xde\xfe\x43\x4a\x42\xf2\xf0\x30\x2c\x0f\x87\x44\x5b\xd5\x7b\x92\x3c\x3c\x8c\xeb\xc3\x21\x19\xcb\xb7\xe1\x56\xe1\xe1\x30\x79\x45\xb9\x40\xf3\x89\x6e\x03\xe5\x6b\xfb\x0d\x45\x55\xab\xe1\x68\x7d\x0b\x0a\xd4\x61\x94\x31\x1e\x7a\xbe\x45\xd6\xed\x4d\x48\xcd\x3c\x04\x92\xd4\x6c\xc9\x88\xf5\xfb\x63\xf8\x68\xe2\x60\xea\x6c\x08\x8f\x5e\xb4\xbf\x2f\x88\xd3\xcb\x27\x90\xa0\x2f\x49\x3e\x33\xe9\xfc\x08\x5b\x31\x79\xf4\xaa\x62\x85\x7a\x45\x05\x79\x32\x8f\x36\xf4\x56\xc8\xde\xd1\x0c\x86\xde\x3c\xc6\x01\xee\x69\xff\x18\x5e\x51\xf1\xa2\xf6\x1b\x5c\xc5\xa0\xe0\xbd\x74\xed\x97\x53\x50\x9e\x9d\x74\xe3\xf5\xdf\xe9\xb3\x1d\x80\x75\x45\x63\x12\x48\x85\xd2\xbf\x7e\xa3\x08\xd8\x90\xb6\xa6\x0c\x20\x36\x7e\x96\x56\x6e\xef\x67\x47\x2c\xfd\x1c\x7d\x7f\xa5\xe9\x8f\x63\xfd\x8c\xc7\xdb\xc6\xcd\x1e\xab\xd5\x68\xb0\x24\xff\xe6\x95\x7a\xd5\x04\x43\xb2\xb3\xfe\xfe\x65\x13\x86\xf0\xfb\x9b\x90\xd6\x75\x23\xed\x00\xc5\xd0\x4e\xd9\xf8\x70\xff\xf8\xbe\x18\xb9\xfa\xde\x70\x00\x65\x8d\x21\x25\x94\x1f\x11\x88\xed\x12\xd2\xcb\x0c\xc6\x11\x8f\x61\xdd\x76\x52\xa1\x19\x5f\x41\x65\x4d\xf7\xef\xf0\x92\xfa\xd9\x30\xf5\x1f\x4b\x20\x5d\xc0\x07\xcd\xf7\x04\x93\xe0\xd5\xa9\x21\x99\x7c\x84\xe1\x7d\x2c\x1a\xad\xe1\x76\x75\x3d\x56\x1d\x79\x06\xfc\xd3\x7d\x1c\x56\x77\x9c\x1f\x92\x52\xdb\x0d\xea\x64\xc0\x86\xe4\x91\xf5\x1d\xbe\x2e\x6c\xed\x1a\xa1\xe5\xab\x6e\xbf\xcf\x5e\xd5\xa7\xbf\x61\xeb\xff\x01\x00\x00\xff\xff\xf4\x41\xd8\x74\xe0\x07\x00\x00"), + }, "/cloudiot.yaml": &vfsgen۰CompressedFileInfo{ name: "cloudiot.yaml", modTime: time.Time{}, @@ -793,6 +800,7 @@ var Assets = func() http.FileSystem { fs["/cloudfunctions.yaml"].(os.FileInfo), fs["/cloudfunctions2.yaml"].(os.FileInfo), fs["/cloudidentity.yaml"].(os.FileInfo), + fs["/cloudids.yaml"].(os.FileInfo), fs["/cloudiot.yaml"].(os.FileInfo), fs["/cloudrun.yaml"].(os.FileInfo), fs["/cloudrunv2.yaml"].(os.FileInfo), From bb40a8ca4e9f89d8d55eda646bbb06529cd08b27 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 18:18:50 +0000 Subject: [PATCH 024/101] chore: gen golden object Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../_generated_object_cloudidsendpoint.golden.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml index a50a4d7431..4d58469823 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml @@ -1,4 +1,4 @@ -apiVersion: cloudids.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudids.cnrm.cloud.google.com/v1beta1 kind: CloudIDSEndpoint metadata: annotations: @@ -30,4 +30,5 @@ status: status: "True" type: Ready observedGeneration: 3 - updateTime: "1970-01-01T00:00:00Z" + observedState: + updateTime: "1970-01-01T00:00:00Z" From 02d6130fd03fe40ccb0be8e3953092bc947debad Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 18:25:32 +0000 Subject: [PATCH 025/101] chore: v1beta1 fixture test Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../_generated_object_cloudidsendpoint.golden.yaml | 0 .../cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/_http.log | 0 .../cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/create.yaml | 0 .../{v1alpha1 => v1beta1}/cloudidsendpoint/dependencies.yaml | 0 .../cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/update.yaml | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename pkg/test/resourcefixture/testdata/basic/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/_http.log (100%) rename pkg/test/resourcefixture/testdata/basic/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/create.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/dependencies.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/cloudids/{v1alpha1 => v1beta1}/cloudidsendpoint/update.yaml (100%) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/_generated_object_cloudidsendpoint.golden.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_http.log b/pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/_http.log similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/_http.log rename to pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/_http.log diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/create.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/create.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/create.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/dependencies.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/dependencies.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/dependencies.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml b/pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/update.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudids/v1alpha1/cloudidsendpoint/update.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudids/v1beta1/cloudidsendpoint/update.yaml From fa4dd5338e53d0745e6192cb12d74d26ab724297 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Tue, 25 Jun 2024 19:44:31 +0000 Subject: [PATCH 026/101] fix: change CBWP CRD field to camel case --- apis/cloudbuild/v1alpha1/workerpool_types.go | 2 +- ...ools.cloudbuild.cnrm.cloud.google.com.yaml | 4 +-- .../v1alpha1/cloudbuildworkerpool/create.yaml | 2 +- .../v1alpha1/cloudbuildworkerpool/update.yaml | 2 +- tests/apichecks/crds_test.go | 30 +++++++++++++++++++ 5 files changed, 35 insertions(+), 5 deletions(-) diff --git a/apis/cloudbuild/v1alpha1/workerpool_types.go b/apis/cloudbuild/v1alpha1/workerpool_types.go index 1d42e6d586..0438745320 100644 --- a/apis/cloudbuild/v1alpha1/workerpool_types.go +++ b/apis/cloudbuild/v1alpha1/workerpool_types.go @@ -55,7 +55,7 @@ type NetworkConfig struct { // +optional EgressOption string `json:"egressOption,omitempty"` // +optional - PeeredNetworkIPRange string `json:"PeeredNetworkIPRange,omitempty"` + PeeredNetworkIPRange string `json:"peeredNetworkIPRange,omitempty"` } // CloudBuildWorkerPoolStatus defines the observed state of Instance diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml index bf8a302694..b370c4c134 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml @@ -49,10 +49,10 @@ spec: properties: networkConfig: properties: - PeeredNetworkIPRange: - type: string egressOption: type: string + peeredNetworkIPRange: + type: string peeredNetworkRef: oneOf: - not: diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml index 4af9dbebef..0447273411 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml @@ -28,5 +28,5 @@ spec: peeredNetworkRef: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} egressOption: NO_PUBLIC_EGRESS - PeeredNetworkIPRange: /29 + peeredNetworkIPRange: /29 diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml index 00d69c8036..e33f218a2c 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml @@ -28,5 +28,5 @@ spec: peeredNetworkRef: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} egressOption: NO_PUBLIC_EGRESS - PeeredNetworkIPRange: /29 + peeredNetworkIPRange: /29 diff --git a/tests/apichecks/crds_test.go b/tests/apichecks/crds_test.go index 41afd98542..6bf2919084 100644 --- a/tests/apichecks/crds_test.go +++ b/tests/apichecks/crds_test.go @@ -242,3 +242,33 @@ func visitProps(props *apiextensions.JSONSchemaProps, fieldPath string, callback klog.Fatalf("unhandled props.Type %q in %+v", props.Type, props) } } + +func TestCRDCamelCase(t *testing.T) { + crds, err := crdloader.LoadAllCRDs() + if err != nil { + t.Fatalf("error loading crds: %v", err) + } + var errs []string + for _, crd := range crds { + for _, version := range crd.Spec.Versions { + visitCRDVersion(version, func(field *CRDField) { + fieldPath := field.FieldPath + first := func() int32 { + tokens := strings.Split(fieldPath, ".") + // Only check the last token to avoid duplication. + for _, first := range tokens[len(tokens)-1] { + return first + } + return 0 + }() + if unicode.IsLetter(first) && unicode.IsUpper(first) { + errs = append(errs, fmt.Sprintf("[refs] crd=%s version=%v: field %q should use camel case", crd.Name, version.Name, field.FieldPath)) + } + }) + } + } + sort.Strings(errs) + if len(errs) != 0 { + t.Fatal(errs) + } +} From 752722a3664a2ddef5d8c49bbdbd065d5c31e71a Mon Sep 17 00:00:00 2001 From: Gemma Hou Date: Mon, 24 Jun 2024 18:53:31 +0000 Subject: [PATCH 027/101] Add dynamic test --- ...proxies.compute.cnrm.cloud.google.com.yaml | 7 +- config/servicemappings/compute.yaml | 7 +- .../v1beta1/computetargethttpsproxy_types.go | 5 +- .../create.yaml | 26 ++++ .../dependencies.yaml | 126 ++++++++++++++++++ .../update.yaml | 26 ++++ .../create.yaml | 26 ++++ .../dependencies.yaml | 61 +++++++++ .../update.yaml | 26 ++++ .../compute/computetargethttpsproxy.md | 7 +- 10 files changed, 310 insertions(+), 7 deletions(-) create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/dependencies.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/update.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/dependencies.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/update.yaml diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml index 7c7606ef03..060973b766 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml @@ -96,7 +96,10 @@ spec: description: |- A reference to the CertificateMap resource uri that identifies a certificate map associated with the given target proxy. This field - can only be set for global target proxies. + can only be set for global target proxies. This field is only supported + for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. + For INTERNAL_MANAGED, use certificateManagerCertificates instead. + sslCertificates and certificateMap fields can not be defined together. oneOf: - not: required: @@ -185,7 +188,7 @@ spec: - external properties: external: - description: 'Allowed value: string of the format `projects/{{project}}/locations/global/serverTlsPolicies/{{value}}`, + description: 'Allowed value: string of the format `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.' type: string diff --git a/config/servicemappings/compute.yaml b/config/servicemappings/compute.yaml index 420f4c8e02..799b79780b 100644 --- a/config/servicemappings/compute.yaml +++ b/config/servicemappings/compute.yaml @@ -2474,7 +2474,10 @@ spec: description: |- A reference to the CertificateMap resource uri that identifies a certificate map associated with the given target proxy. This field - can only be set for global target proxies. + can only be set for global target proxies. This field is only supported + for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. + For INTERNAL_MANAGED, use certificateManagerCertificates instead. + sslCertificates and certificateMap fields can not be defined together. gvk: kind: CertificateManagerCertificateMap version: v1beta1 @@ -2496,7 +2499,7 @@ spec: kind: NetworkSecurityServerTLSPolicy version: v1beta1 group: networksecurity.cnrm.cloud.google.com - valueTemplate: "projects/{{project}}/locations/global/serverTlsPolicies/{{value}}" + valueTemplate: "projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}" dclBasedResource: true containers: - type: project diff --git a/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go b/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go index 87c6a4a451..d7884fae20 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go @@ -41,7 +41,10 @@ type ComputeTargetHTTPSProxySpec struct { /* A reference to the CertificateMap resource uri that identifies a certificate map associated with the given target proxy. This field - can only be set for global target proxies. */ + can only be set for global target proxies. This field is only supported + for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. + For INTERNAL_MANAGED, use certificateManagerCertificates instead. + sslCertificates and certificateMap fields can not be defined together. */ // +optional CertificateMapRef *v1alpha1.ResourceRef `json:"certificateMapRef,omitempty"` diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/create.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/create.yaml new file mode 100644 index 0000000000..e256eee8c5 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/create.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeTargetHTTPSProxy +metadata: + name: computetargethttpsproxy-${uniqueId} +spec: + description: "test description" + urlMapRef: + name: computeurlmap-${uniqueId} + certificateManagerCertificates: + - name: certificatemanagercertificates-${uniqueId} + quicOverride: DISABLE + location: global diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/dependencies.yaml new file mode 100644 index 0000000000..1da3be6ce8 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/dependencies.yaml @@ -0,0 +1,126 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeHealthCheck +metadata: + name: computehealthcheck-${uniqueId} +spec: + checkIntervalSec: 10 + httpHealthCheck: + port: 80 + location: global +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeBackendService +metadata: + name: computebackendservice-${uniqueId} +spec: + healthChecks: + - healthCheckRef: + name: computehealthcheck-${uniqueId} + location: global + loadBalancingScheme: "INTERNAL_MANAGED" +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeURLMap +metadata: + name: computeurlmap-${uniqueId} +spec: + defaultService: + backendServiceRef: + name: computebackendservice-${uniqueId} + location: global +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeURLMap +metadata: + name: computeurlmap-2-${uniqueId} +spec: + defaultService: + backendServiceRef: + name: computebackendservice-${uniqueId} + location: global +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-${uniqueId} +stringData: + privateKey: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC85P3bkYbiUpX0 + e8Aext5wyEY8CqOFVELbqJlQkLyhJY673mVYlJflmFuIXo3PX3bAMZX9UCHcvGPx + Fi1yfvl4PFApNwqi4OV35OgIYhK08VZtHqtvKwu5moVURdU5O+nZAh6Aid+u+cYL + iPtCmJzvOsUH0fMnH7Qel4TI7iCR3Ml8Qr+AyaiVSAmspfn2j7T5GP4mrcS1xDEv + V9UFUlLHQ2+mhHV5imccyv+skpmRRRnbETzqGu2mI/o4RhBOLTSuZ/gadkXlB8YY + PTG6hBe1HtO75eEqJtciHFwPhHM8tfVBLTjQ9e+Yw8sNvhiVRDpXdGzjYkO+SQPA + gxdm+fxrAgMBAAECggEAV4/A24TQpV4KFBw/WSTvnRFBeXinB1mhamhztWR6hCrA + SPcVPKQY632eRI8sJmpGxl3V/Ogl4khT/cA9jfstEl7G++v/WrRsupCaPLSVnlnX + KdsTNgOauk1WK9P5PMA4rPcuA4Cl91riQpubeWn8KWsxRWg90i+Ak8PB8lBsOaB1 + QzjigWlrRWSpodaw0MBIMZFDL2BYK8HEr+wyATYIyGvDQc9zCnMQIQIZyEPYepLO + 04Dw17YcjgnoJ5gLAFiTvDrCpTMewud1RQzvW5TAvG2piw34sf3QMGPM7aXNrfuZ + 4ZPC/MwVQgq9Nc+jeDsjApQmJKJ+3a8OdIPU89ArTQKBgQDCpHHQe1RzpHmIx47/ + 9N5r+NPBhh8flDYmvgi6zPeBfrAaLWhidS8c7Voa6HwvMxbhryDEvc0YqI3vllfy + xnRF+DfSryozW0gjrkXDGoOzqOJ3EuQwLSJnyX6La2lmufqsRFazwYJ5sxcjoGHK + /sbwZkIUj1ejuH44ve+ZJQFfpwKBgQD4cLJrJhqImUDhHZRx9jBvxyeHy/RjmHK6 + 70xQVDi9ZqeExHwtoSbolhXKLB1RtBnw+t5Csy7IDNBDsbUg9fXU8KyCTIdmsyws + bDb5hdKsUF76rkKzlpttiXMRVWGS3CMKWahBpnL3lFB3tdtmskemkBTXVn4VgKAH + xk9XnZ11nQKBgDbQSJ0FnkrSzscOK984/ko50Kh3NNyXyIgwjBTPFASLwNweXX8c + sR/cV7usLQy9vnvf7cJ6EQAYt5/5Httnt+bceBwE6EV+N1qVAWBoXx6BOQV/dHN8 + wmun+tMYdJ5RUZ6hwCjvHedX3/RQfjnEdhHNOl6/31Zj5mfkVU0zdqeRAoGAcvIh + erXMfPr7K6y16+xOCMmKHqhc0F/OZXMmSdxNzEPcqe8GzU3MZLxcJIg4oH7FqdtI + Tm/86w4Spd9owHFMZlNcXYTu+LNZcsw2u0gRayxcZXuO3OyHySxZEuIAHSTBCZ7l + 3EoY0zfJ6zk249MEl6n+GouoFmbGpBI6z3zbR3kCgYEAlCNZVH4uJrP5beTOZTTR + VJRk7BXvEC6HsM140YtIN7NHy2GtzrgmmY/ZAFB/hX8Ft4ex2MxbIp3hvxroTqGn + bfu7uv97NoPQqbjtc3Mz8h2IaXTVDUnWYY5gDu6rM2w+Z75/sWIGiTWrsdYX4ohb + ujngzJ7Ew7GgKSboj6mtlVM= + -----END PRIVATE KEY----- +--- +apiVersion: certificatemanager.cnrm.cloud.google.com/v1beta1 +kind: CertificateManagerCertificate +metadata: + name: certificatemanagercertificates-${uniqueId} +spec: + scope: "ALL_REGIONS" + location : "global" + projectRef: + external: ${projectId} + description: ALL_REGIONS scoped self-managed certificate + selfManaged: + pemCertificate: |- + -----BEGIN CERTIFICATE----- + MIIDDzCCAfegAwIBAgIUDOiCLH9QNMMYnjPZVf4VwO9blsEwDQYJKoZIhvcNAQEL + BQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wIBcNMjIwODI0MDg0MDUxWhgPMzAy + MTEyMjUwODQwNTFaMBYxFDASBgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG + 9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvOT925GG4lKV9HvAHsbecMhGPAqjhVRC26iZ + UJC8oSWOu95lWJSX5ZhbiF6Nz192wDGV/VAh3Lxj8RYtcn75eDxQKTcKouDld+To + CGIStPFWbR6rbysLuZqFVEXVOTvp2QIegInfrvnGC4j7Qpic7zrFB9HzJx+0HpeE + yO4gkdzJfEK/gMmolUgJrKX59o+0+Rj+Jq3EtcQxL1fVBVJSx0NvpoR1eYpnHMr/ + rJKZkUUZ2xE86hrtpiP6OEYQTi00rmf4GnZF5QfGGD0xuoQXtR7Tu+XhKibXIhxc + D4RzPLX1QS040PXvmMPLDb4YlUQ6V3Rs42JDvkkDwIMXZvn8awIDAQABo1MwUTAd + BgNVHQ4EFgQURuo1CCZZAUv7xi02f2nC5tRbf18wHwYDVR0jBBgwFoAURuo1CCZZ + AUv7xi02f2nC5tRbf18wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC + AQEAqx3tDxurnYr9EUPhF5/LlDPYM+VI7EgrKdRnuIqUlZI0tm3vOGME0te6dBTC + YLNaHLW3m/4Tm4M2eg0Kpz6CxJfn3109G31dCi0xwzSDHf5TPUWvqIVhq5WRgMIf + n8KYBlQSmqdJBRztUIQH/UPFnSbxymlS4s5qwDgTH5ag9EEBcnWsQ2LZjKi0eqve + MaqAvvB+j8RGZzYY4re94bSJI42zIZ6nMWPtXwRuDc30xl/u+E0jWIgWbPwSd6Km + 3wnJnGiU2ezPGq3zEU+Rc39VVIFKQpciNeYuF3neHPJvYOf58qW2Z8s0VH0MR1x3 + 3DoO/e30FIr9j+PRD+s5BPKF2A== + -----END CERTIFICATE----- + pemPrivateKey: + valueFrom: + secretKeyRef: + name: secret-${uniqueId} + key: privateKey \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/update.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/update.yaml new file mode 100644 index 0000000000..c924dfee15 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemanagercertificates/update.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeTargetHTTPSProxy +metadata: + name: computetargethttpsproxy-${uniqueId} +spec: + description: "test description" + urlMapRef: + name: computeurlmap-2-${uniqueId} + certificateManagerCertificates: + - name: certificatemanagercertificates-${uniqueId} + quicOverride: ENABLE + location: global diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/create.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/create.yaml new file mode 100644 index 0000000000..234bb7a4cc --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/create.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeTargetHTTPSProxy +metadata: + name: computetargethttpsproxy-${uniqueId} +spec: + description: "test description" + urlMapRef: + name: computeurlmap-${uniqueId} + certificateMapRef: + name: certificatemap-${uniqueId} + quicOverride: DISABLE + location: global diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/dependencies.yaml new file mode 100644 index 0000000000..1d88748598 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/dependencies.yaml @@ -0,0 +1,61 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeHealthCheck +metadata: + name: computehealthcheck-${uniqueId} +spec: + checkIntervalSec: 10 + httpHealthCheck: + port: 80 + location: global +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeBackendService +metadata: + name: computebackendservice-${uniqueId} +spec: + healthChecks: + - healthCheckRef: + name: computehealthcheck-${uniqueId} + location: global +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeURLMap +metadata: + name: computeurlmap-${uniqueId} +spec: + defaultService: + backendServiceRef: + name: computebackendservice-${uniqueId} + location: global +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeURLMap +metadata: + name: computeurlmap-2-${uniqueId} +spec: + defaultService: + backendServiceRef: + name: computebackendservice-${uniqueId} + location: global +--- +apiVersion: certificatemanager.cnrm.cloud.google.com/v1beta1 +kind: CertificateManagerCertificateMap +metadata: + name: certificatemap-${uniqueId} +spec: + projectRef: + external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/update.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/update.yaml new file mode 100644 index 0000000000..2bdf375ea9 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computetargethttpsproxy/globaltargethttpsproxycertificatemap/update.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeTargetHTTPSProxy +metadata: + name: computetargethttpsproxy-${uniqueId} +spec: + description: "test description" + urlMapRef: + name: computeurlmap-2-${uniqueId} + certificateMapRef: + name: certificatemap-${uniqueId} + quicOverride: ENABLE + location: global diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md index e2814ac409..eb81005f32 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md @@ -180,7 +180,10 @@ sslCertificates and certificateManagerCertificates fields can not be defined tog

object

{% verbatim %}A reference to the CertificateMap resource uri that identifies a certificate map associated with the given target proxy. This field -can only be set for global target proxies.{% endverbatim %}

+can only be set for global target proxies. This field is only supported +for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. +For INTERNAL_MANAGED, use certificateManagerCertificates instead. +sslCertificates and certificateMap fields can not be defined together.{% endverbatim %}

@@ -307,7 +310,7 @@ If left blank, communications are not encrypted.{% endverbatim %}

string

-

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/global/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.{% endverbatim %}

+

{% verbatim %}Allowed value: string of the format `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{value}}`, where {{value}} is the `name` field of a `NetworkSecurityServerTLSPolicy` resource.{% endverbatim %}

From 7d120ce7753cd58fc013cb49566ef5642f8397a3 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Tue, 25 Jun 2024 21:36:49 +0000 Subject: [PATCH 028/101] update golden object --- .../cloudbuild/v1alpha1/cloudbuildworkerpool_types.go | 4 ++-- .../apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go | 8 ++++---- .../_generated_object_cloudbuildworkerpool.golden.yaml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go index 31b24c8c60..ac74f7c809 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go @@ -37,10 +37,10 @@ import ( type WorkerpoolNetworkConfig struct { // +optional - PeeredNetworkIPRange *string `json:"PeeredNetworkIPRange,omitempty"` + EgressOption *string `json:"egressOption,omitempty"` // +optional - EgressOption *string `json:"egressOption,omitempty"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` PeeredNetworkRef v1alpha1.ResourceRef `json:"peeredNetworkRef"` } diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go index d996ae74b3..2ed885b8f9 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go @@ -162,13 +162,13 @@ func (in *CloudBuildWorkerPoolStatus) DeepCopy() *CloudBuildWorkerPoolStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkerpoolNetworkConfig) DeepCopyInto(out *WorkerpoolNetworkConfig) { *out = *in - if in.PeeredNetworkIPRange != nil { - in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + if in.EgressOption != nil { + in, out := &in.EgressOption, &out.EgressOption *out = new(string) **out = **in } - if in.EgressOption != nil { - in, out := &in.EgressOption, &out.EgressOption + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange *out = new(string) **out = **in } diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index 27c31a62d4..94c00dde23 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -14,8 +14,8 @@ spec: location: us-central1 privatePoolV1Config: networkConfig: - PeeredNetworkIPRange: /29 egressOption: NO_PUBLIC_EGRESS + peeredNetworkIPRange: /29 peeredNetworkRef: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} workerConfig: From 1ea50424df31698ba5847e521e915bc68c5cd432 Mon Sep 17 00:00:00 2001 From: zicongmei Date: Tue, 25 Jun 2024 12:35:30 -0700 Subject: [PATCH 029/101] Update composition samples --- .../compositions/samples/AttachedAKS/01-composition.yaml | 5 +++-- .../compositions/samples/AttachedAKS/02-context.yaml | 2 +- .../compositions/samples/AttachedAKS/setup-Azure-ASO.md | 2 +- .../compositions/samples/AttachedEKS/01-composition.yaml | 3 ++- .../compositions/samples/AttachedEKS/03-attached-1.yaml | 6 +++--- .../compositions/samples/AttachedEKS/setup-AWS-ACK.md | 2 +- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/experiments/compositions/samples/AttachedAKS/01-composition.yaml b/experiments/compositions/samples/AttachedAKS/01-composition.yaml index 7215a0f513..67ab93f97e 100644 --- a/experiments/compositions/samples/AttachedAKS/01-composition.yaml +++ b/experiments/compositions/samples/AttachedAKS/01-composition.yaml @@ -50,7 +50,7 @@ spec: group: "" version: v1 kind: ConfigMap - resource: issuer + resource: configmaps nameSuffix: "-issuer" fieldRef: - path: ".data.oidc" @@ -59,7 +59,7 @@ spec: apiVersion: composition.google.com/v1alpha1 kind: Composition metadata: - name: compo-2 + name: compo-aks-1 spec: inputAPIGroup: attachedakses.facade.compositions.google.com expanders: @@ -118,6 +118,7 @@ spec: template: "" configref: name: aks-issuer + namespace: default - type: jinja2 version: v0.0.1 name: attach diff --git a/experiments/compositions/samples/AttachedAKS/02-context.yaml b/experiments/compositions/samples/AttachedAKS/02-context.yaml index f1eea49482..e2575d9c94 100644 --- a/experiments/compositions/samples/AttachedAKS/02-context.yaml +++ b/experiments/compositions/samples/AttachedAKS/02-context.yaml @@ -26,7 +26,7 @@ metadata: namespace: "${NAMESPACE}" spec: billingProject: "${PROJECT_ID}" - googleServiceAccount: "${GSA_EMAIL}" + googleServiceAccount: "${TEAM_GSA_EMAIL}" requestProjectPolicy: BILLING_PROJECT --- # Config this namespace for composition diff --git a/experiments/compositions/samples/AttachedAKS/setup-Azure-ASO.md b/experiments/compositions/samples/AttachedAKS/setup-Azure-ASO.md index c5f7a6ea8e..e46cf758e0 100644 --- a/experiments/compositions/samples/AttachedAKS/setup-Azure-ASO.md +++ b/experiments/compositions/samples/AttachedAKS/setup-Azure-ASO.md @@ -40,7 +40,7 @@ export GSA_EMAIL=$USER-allotrope@${PROJECT_ID}.iam.gserviceaccount.com WORKLOAD_IDENTITY_POOL="${PROJECT_ID}.svc.id.goog" # grant workload identity bindings permissions -export ASO_NAMESPACE=azureserviceoperator-system # Don’t change +export ASO_NAMESPACE=kontrollers-azureserviceoperator-system # Don’t change export ASO_KSA=azureserviceoperator-default # Don’t change gcloud iam service-accounts add-iam-policy-binding ${GSA_EMAIL} \ --role roles/iam.workloadIdentityUser \ diff --git a/experiments/compositions/samples/AttachedEKS/01-composition.yaml b/experiments/compositions/samples/AttachedEKS/01-composition.yaml index 33a18b4967..9d5a96f47e 100644 --- a/experiments/compositions/samples/AttachedEKS/01-composition.yaml +++ b/experiments/compositions/samples/AttachedEKS/01-composition.yaml @@ -63,7 +63,7 @@ spec: group: "" version: v1 kind: ConfigMap - resource: issuer + resource: configmaps nameSuffix: "-issuer" fieldRef: - path: ".data.oidc" @@ -377,6 +377,7 @@ spec: template: "" configref: name: eks-issuer + namespace: default - type: jinja2 version: v0.0.1 name: attach diff --git a/experiments/compositions/samples/AttachedEKS/03-attached-1.yaml b/experiments/compositions/samples/AttachedEKS/03-attached-1.yaml index f23ae1b20b..3d6bee6df7 100644 --- a/experiments/compositions/samples/AttachedEKS/03-attached-1.yaml +++ b/experiments/compositions/samples/AttachedEKS/03-attached-1.yaml @@ -21,16 +21,16 @@ spec: gcpRegion: us-west1 kubernetesVersion: "1.28" attachedPlatformVersion: "1.28.0-gke.2" - awsRegion: us-west-2 + awsRegion: us-west-1 gcpProjectNumber: "933682497375" adminUsers: - zicong@google.com awsAccessIdentity: "arn:aws:iam::507099386010:user/zicong" awsAvilibilityZones: - - surffix: a + - surffix: b publicSubnet: "10.0.11.0/24" privateSubnet: "10.0.1.0/24" - - surffix: b + - surffix: c publicSubnet: "10.0.12.0/24" privateSubnet: "10.0.2.0/24" \ No newline at end of file diff --git a/experiments/compositions/samples/AttachedEKS/setup-AWS-ACK.md b/experiments/compositions/samples/AttachedEKS/setup-AWS-ACK.md index 1404f73e50..fe1c67b421 100644 --- a/experiments/compositions/samples/AttachedEKS/setup-AWS-ACK.md +++ b/experiments/compositions/samples/AttachedEKS/setup-AWS-ACK.md @@ -40,7 +40,7 @@ export GSA_EMAIL=$USER-allotrope@${PROJECT_ID}.iam.gserviceaccount.com WORKLOAD_IDENTITY_POOL="${PROJECT_ID}.svc.id.goog" # grant workload identity bindings permissions -export ACK_NAMESPACE=ack-system # Don’t change +export ACK_NAMESPACE=kontrollers-ack-system # Don’t change export ACK_KSA_NAME=ack-controller # Don’t change gcloud iam service-accounts add-iam-policy-binding ${GSA_EMAIL} \ --role roles/iam.workloadIdentityUser \ From d9b56454b8db88d24c0a92617016b0cdbb9692eb Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Tue, 25 Jun 2024 22:10:11 +0000 Subject: [PATCH 030/101] resolve comments --- tests/apichecks/crds_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/apichecks/crds_test.go b/tests/apichecks/crds_test.go index 6bf2919084..8fd3cb13cc 100644 --- a/tests/apichecks/crds_test.go +++ b/tests/apichecks/crds_test.go @@ -261,8 +261,8 @@ func TestCRDCamelCase(t *testing.T) { } return 0 }() - if unicode.IsLetter(first) && unicode.IsUpper(first) { - errs = append(errs, fmt.Sprintf("[refs] crd=%s version=%v: field %q should use camel case", crd.Name, version.Name, field.FieldPath)) + if unicode.IsUpper(first) { + errs = append(errs, fmt.Sprintf("[jsonNaming] crd=%s version=%v: field %q should use camel case", crd.Name, version.Name, field.FieldPath)) } }) } From 6093245269d7c1e57e6c5f2386e0bde65fbf46eb Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 22:20:28 +0000 Subject: [PATCH 031/101] chore: samples Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../cloudids_v1beta1_cloudidsendpoint.yaml | 2 +- ...ml => compute_v1beta1_computeaddress.yaml} | 4 +-- ...ml => compute_v1beta1_computenetwork.yaml} | 2 +- ..._v1beta1_servicenetworkingconnection.yaml} | 6 ++--- .../cloudids/cloudidsendpoint.md | 26 +++++++++---------- .../resource-reference/overview.md | 2 +- 6 files changed, 21 insertions(+), 21 deletions(-) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_computeaddress.yaml => compute_v1beta1_computeaddress.yaml} (92%) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_cloudnetwork.yaml => compute_v1beta1_computenetwork.yaml} (95%) rename config/samples/resources/cloudidsendpoint/{cloudids_v1beta1_servicenetworkingconnection.yaml => servicenetworking_v1beta1_servicenetworkingconnection.yaml} (89%) diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml index f592b9d34f..12a3aca87d 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml +++ b/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudidsendpoint.yaml @@ -18,7 +18,7 @@ metadata: name: cloudidsendpoint-sample spec: networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep severity: INFORMATIONAL location: us-west2-a projectRef: diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml b/config/samples/resources/cloudidsendpoint/compute_v1beta1_computeaddress.yaml similarity index 92% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml rename to config/samples/resources/cloudidsendpoint/compute_v1beta1_computeaddress.yaml index bc998f0963..ec317f41c4 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_computeaddress.yaml +++ b/config/samples/resources/cloudidsendpoint/compute_v1beta1_computeaddress.yaml @@ -15,11 +15,11 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: cloudidsendpoint-dep2 + name: cloudidsendpoint-dep spec: location: global addressType: INTERNAL networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep prefixLength: 16 purpose: VPC_PEERING diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml b/config/samples/resources/cloudidsendpoint/compute_v1beta1_computenetwork.yaml similarity index 95% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml rename to config/samples/resources/cloudidsendpoint/compute_v1beta1_computenetwork.yaml index dbef8e6d4e..494fa0c79a 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_cloudnetwork.yaml +++ b/config/samples/resources/cloudidsendpoint/compute_v1beta1_computenetwork.yaml @@ -15,6 +15,6 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork metadata: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep spec: autoCreateSubnetworks: false \ No newline at end of file diff --git a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml b/config/samples/resources/cloudidsendpoint/servicenetworking_v1beta1_servicenetworkingconnection.yaml similarity index 89% rename from config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml rename to config/samples/resources/cloudidsendpoint/servicenetworking_v1beta1_servicenetworkingconnection.yaml index fa102f5c6f..77f681e3a2 100644 --- a/config/samples/resources/cloudidsendpoint/cloudids_v1beta1_servicenetworkingconnection.yaml +++ b/config/samples/resources/cloudidsendpoint/servicenetworking_v1beta1_servicenetworkingconnection.yaml @@ -15,10 +15,10 @@ apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: cloudidsendpoint-dep3 + name: cloudidsendpoint-dep spec: networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep reservedPeeringRanges: - - name: cloudidsendpoint-dep2 + - name: cloudidsendpoint-dep service: servicenetworking.googleapis.com \ No newline at end of file diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md index 6e85c18173..9d8a5dcb38 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -379,40 +379,40 @@ metadata: name: cloudidsendpoint-sample spec: networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep severity: INFORMATIONAL location: us-west2-a projectRef: external: ${PROJECT_ID?} --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: cloudidsendpoint-dep1 -spec: - autoCreateSubnetworks: false ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: cloudidsendpoint-dep2 + name: cloudidsendpoint-dep spec: location: global addressType: INTERNAL networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep prefixLength: 16 purpose: VPC_PEERING --- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: cloudidsendpoint-dep +spec: + autoCreateSubnetworks: false +--- apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: cloudidsendpoint-dep3 + name: cloudidsendpoint-dep spec: networkRef: - name: cloudidsendpoint-dep1 + name: cloudidsendpoint-dep reservedPeeringRanges: - - name: cloudidsendpoint-dep2 + - name: cloudidsendpoint-dep service: servicenetworking.googleapis.com ``` diff --git a/scripts/generate-google3-docs/resource-reference/overview.md b/scripts/generate-google3-docs/resource-reference/overview.md index c318350d25..a2ad258ddd 100644 --- a/scripts/generate-google3-docs/resource-reference/overview.md +++ b/scripts/generate-google3-docs/resource-reference/overview.md @@ -130,7 +130,7 @@ issues for {{product_name_short}}. CloudIdentityMembership - {{cloudids_endpoint_name}} + {{ids_name}} CloudIDSEndpoint From 900c50288d85535ecf084932e3a8391ec3e59fae Mon Sep 17 00:00:00 2001 From: zicongmei Date: Tue, 25 Jun 2024 15:41:05 -0700 Subject: [PATCH 032/101] Fix composition sample error --- .../samples/AttachedAKS/aks-charlie.sh | 26 +++++++++---------- .../samples/AttachedEKS/02-context.yaml | 2 +- .../samples/AttachedEKS/eks-charlie.sh | 12 ++++----- 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/experiments/compositions/samples/AttachedAKS/aks-charlie.sh b/experiments/compositions/samples/AttachedAKS/aks-charlie.sh index d5dbcc7a4f..6f902a4f8d 100644 --- a/experiments/compositions/samples/AttachedAKS/aks-charlie.sh +++ b/experiments/compositions/samples/AttachedAKS/aks-charlie.sh @@ -18,25 +18,25 @@ kubectl apply -f 01-composition.yaml # Create a GCP service account for this team and # grant KCC permission according to https://cloud.google.com/config-connector/docs/how-to/install-namespaced export NAMESPACE=team-aks -export GCP_SA_NAME="${NAMESPACE}" +export TEAM_GCP_SA_NAME="${NAMESPACE}" export PROJECT_ID=$(gcloud config get-value project) -export GSA_EMAIL="${GCP_SA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" -gcloud iam service-accounts create ${GCP_SA_NAME} --project ${PROJECT_ID} +export TEAM_GSA_EMAIL="${TEAM_GCP_SA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" +gcloud iam service-accounts create ${TEAM_GCP_SA_NAME} --project ${PROJECT_ID} gcloud projects add-iam-policy-binding ${PROJECT_ID} \ - --member="serviceAccount:${GSA_EMAIL}" \ + --member="serviceAccount:${TEAM_GSA_EMAIL}" \ --role="roles/owner" gcloud iam service-accounts add-iam-policy-binding \ - ${GSA_EMAIL} \ + ${TEAM_GSA_EMAIL} \ --member="serviceAccount:${PROJECT_ID}.svc.id.goog[cnrm-system/cnrm-controller-manager-${NAMESPACE}]" \ --role="roles/iam.workloadIdentityUser" \ --project ${PROJECT_ID} gcloud projects add-iam-policy-binding ${PROJECT_ID} \ - --member="serviceAccount:${GSA_EMAIL}" \ + --member="serviceAccount:${TEAM_GSA_EMAIL}" \ --role="roles/monitoring.metricWriter" WORKLOAD_IDENTITY_POOL="${PROJECT_ID}.svc.id.goog" export ASO_NAMESPACE=azureserviceoperator-system # Don’t change export ASO_KSA=azureserviceoperator-default # Don’t change -gcloud iam service-accounts add-iam-policy-binding ${GSA_EMAIL} \ +gcloud iam service-accounts add-iam-policy-binding ${TEAM_GSA_EMAIL} \ --role roles/iam.workloadIdentityUser \ --member "serviceAccount:${WORKLOAD_IDENTITY_POOL}[${ASO_NAMESPACE}/${ASO_KSA}]" \ --condition None @@ -62,23 +62,21 @@ az identity create --name ${MI_NAME} \ export AZURE_CLIENT_ID=$(az identity show \ --name ${MI_NAME} --resource-group ${MI_RESOURCE_GROUP} \ --query "clientId" -otsv) -MI_PRINCIPAL_ID=$(az identity show \ - --name ${MI_NAME} --resource-group ${MI_RESOURCE_GROUP} \ - --query "principalId" -otsv) # Assign the permissions to this MI. # User can use other permissions to manage their resources. az role assignment create \ - --assignee $MI_PRINCIPAL_ID \ + --assignee $AZURE_CLIENT_ID \ --role contributor \ --scope /subscriptions/$AZURE_SUBSCRIPTION_ID # Allow the GCP service account used by setup-Azure-ASO.md#create-a-gcp-service-account # to inpersonate this Azure managed identity. # In this example, we use the default GCP service account. -PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") -DEFAULT_GSA_EMAIL="service-${PROJECT_NUMBER}@gcp-sa-yakima.iam.gserviceaccount.com" -DEFAULT_GSA_SUB=$(gcloud iam service-accounts describe ${GSA_EMAIL} \ +DEFAULT_GSA_EMAIL=$(kubectl get asokontroller \ + asokontroller.kontrollers.cnrm.cloud.google.com \ + -ojson | jq -r .spec.googleServiceAccount) +DEFAULT_GSA_SUB=$(gcloud iam service-accounts describe ${TEAM_GSA_EMAIL} \ --format "value(oauth2ClientId)") az identity federated-credential create \ diff --git a/experiments/compositions/samples/AttachedEKS/02-context.yaml b/experiments/compositions/samples/AttachedEKS/02-context.yaml index 3671c8b9e0..560afceff4 100644 --- a/experiments/compositions/samples/AttachedEKS/02-context.yaml +++ b/experiments/compositions/samples/AttachedEKS/02-context.yaml @@ -26,7 +26,7 @@ metadata: namespace: "${NAMESPACE}" spec: billingProject: "${PROJECT_ID}" - googleServiceAccount: "${GSA_EMAIL}" + googleServiceAccount: "${TEAM_GSA_EMAIL}" requestProjectPolicy: BILLING_PROJECT --- # Config this namespace for composition diff --git a/experiments/compositions/samples/AttachedEKS/eks-charlie.sh b/experiments/compositions/samples/AttachedEKS/eks-charlie.sh index e0b09ce8a2..a25cf8c343 100644 --- a/experiments/compositions/samples/AttachedEKS/eks-charlie.sh +++ b/experiments/compositions/samples/AttachedEKS/eks-charlie.sh @@ -18,21 +18,21 @@ kubectl apply -f 01-composition.yaml # Create a GCP service account for this team and # grant KCC permission according to https://cloud.google.com/config-connector/docs/how-to/install-namespaced export NAMESPACE=team-eks -export GCP_SA_NAME="${NAMESPACE}" +export TEAM_GCP_SA_NAME="${NAMESPACE}" export PROJECT_ID=$(gcloud config get-value project) -export GSA_EMAIL="${GCP_SA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" +export TEAM_GSA_EMAIL="${TEAM_GCP_SA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" -gcloud iam service-accounts create ${GCP_SA_NAME} --project ${PROJECT_ID} +gcloud iam service-accounts create ${TEAM_GCP_SA_NAME} --project ${PROJECT_ID} gcloud projects add-iam-policy-binding ${PROJECT_ID} \ - --member="serviceAccount:${GSA_EMAIL}" \ + --member="serviceAccount:${TEAM_GSA_EMAIL}" \ --role="roles/owner" gcloud iam service-accounts add-iam-policy-binding \ - ${GSA_EMAIL} \ + ${TEAM_GSA_EMAIL} \ --member="serviceAccount:${PROJECT_ID}.svc.id.goog[cnrm-system/cnrm-controller-manager-${NAMESPACE}]" \ --role="roles/iam.workloadIdentityUser" \ --project ${PROJECT_ID} gcloud projects add-iam-policy-binding ${PROJECT_ID} \ - --member="serviceAccount:${GSA_EMAIL}" \ + --member="serviceAccount:${TEAM_GSA_EMAIL}" \ --role="roles/monitoring.metricWriter" # Create namespace for Alice team From 7f106b760083ce9303235ed1d50015c0bf6a036a Mon Sep 17 00:00:00 2001 From: Ziyue Yan Date: Tue, 25 Jun 2024 21:26:32 +0000 Subject: [PATCH 033/101] Deduplicate string replacement for startTime --- .../sql/v1beta1/sqlinstance/mysqlinstance/_http.log | 6 +++--- .../sql/v1beta1/sqlinstance/postgresinstance/_http.log | 6 +++--- .../v1beta1/sqlinstance/sqlserverinstance/_http.log | 4 ++-- .../testdata/basic/sql/v1beta1/sqluser/_http.log | 10 +++++----- .../iammemberreferences/sqlinstanceref/_http.log | 6 +++--- tests/e2e/normalize.go | 2 ++ tests/e2e/unified_test.go | 2 -- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/_http.log b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/_http.log index 8f8e7a4e2c..d92c5771fc 100644 --- a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/_http.log @@ -53,7 +53,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-sample-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-sample-${uniqueId}", @@ -118,7 +118,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE_USER", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-sample-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-sample-${uniqueId}", @@ -274,7 +274,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-sample-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-sample-${uniqueId}", diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/postgresinstance/_http.log b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/postgresinstance/_http.log index c83d6a1b28..22ba3284eb 100644 --- a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/postgresinstance/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/postgresinstance/_http.log @@ -588,7 +588,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", @@ -839,7 +839,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "UPDATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", @@ -1013,7 +1013,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/_http.log b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/_http.log index 28b3df6ae3..e9b453f423 100644 --- a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/_http.log @@ -197,7 +197,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-sample-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-sample-${uniqueId}", @@ -448,7 +448,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-sample-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-sample-${uniqueId}", diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqluser/_http.log b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqluser/_http.log index c586d68737..596557b737 100644 --- a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqluser/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqluser/_http.log @@ -51,7 +51,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqluser-dep-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqluser-dep-${uniqueId}", @@ -116,7 +116,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE_USER", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqluser-dep-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqluser-dep-${uniqueId}", @@ -420,7 +420,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE_USER", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqluser-dep-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqluser-dep-${uniqueId}", @@ -935,7 +935,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE_USER", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqluser-dep-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqluser-dep-${uniqueId}", @@ -1090,7 +1090,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqluser-dep-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqluser-dep-${uniqueId}", diff --git a/pkg/test/resourcefixture/testdata/iammemberreferences/sqlinstanceref/_http.log b/pkg/test/resourcefixture/testdata/iammemberreferences/sqlinstanceref/_http.log index c34d669ea2..7ba9a44b48 100644 --- a/pkg/test/resourcefixture/testdata/iammemberreferences/sqlinstanceref/_http.log +++ b/pkg/test/resourcefixture/testdata/iammemberreferences/sqlinstanceref/_http.log @@ -51,7 +51,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "CREATE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", @@ -116,7 +116,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE_USER", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", @@ -626,7 +626,7 @@ Grpc-Metadata-Content-Type: application/grpc "name": "${operationID}", "operationType": "DELETE", "selfLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/operations/${operationID}", - "startTime": null, + "startTime": "2024-04-01T12:34:56.123456Z", "status": "PENDING", "targetId": "sqlinstance-${uniqueId}", "targetLink": "https://sqladmin.googleapis.com/sql/v1beta4/projects/${projectId}/instances/sqlinstance-${uniqueId}", diff --git a/tests/e2e/normalize.go b/tests/e2e/normalize.go index 09399cbbeb..e7a5dbad59 100644 --- a/tests/e2e/normalize.go +++ b/tests/e2e/normalize.go @@ -378,6 +378,8 @@ func normalizeHTTPResponses(t *testing.T, events test.LogEntries) { // Compute operations visitor.replacePaths[".fingerprint"] = "abcdef0123A=" + visitor.replacePaths[".startTime"] = "2024-04-01T12:34:56.123456Z" + events.PrettifyJSON(func(obj map[string]any) { if err := visitor.visitMap(obj, ""); err != nil { t.Fatalf("error normalizing response: %v", err) diff --git a/tests/e2e/unified_test.go b/tests/e2e/unified_test.go index d454435cd9..1c450aab48 100644 --- a/tests/e2e/unified_test.go +++ b/tests/e2e/unified_test.go @@ -509,7 +509,6 @@ func runScenario(ctx context.Context, t *testing.T, testPause bool, fixture reso addReplacement("createTime", "2024-04-01T12:34:56.123456Z") addReplacement("insertTime", "2024-04-01T12:34:56.123456Z") - addReplacement("startTime", "2024-04-01T12:34:56.123456Z") addReplacement("response.createTime", "2024-04-01T12:34:56.123456Z") addReplacement("creationTimestamp", "2024-04-01T12:34:56.123456Z") addReplacement("metadata.createTime", "2024-04-01T12:34:56.123456Z") @@ -546,7 +545,6 @@ func runScenario(ctx context.Context, t *testing.T, testPause bool, fixture reso // For compute operations addReplacement("insertTime", "2024-04-01T12:34:56.123456Z") - addReplacement("startTime", "2024-04-01T12:34:56.123456Z") addReplacement("user", "user@example.com") // Specific to vertexai From c18ede8929e5505bfb7ffd027cba7cfacca4ef35 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 20 Jun 2024 19:06:25 -0400 Subject: [PATCH 034/101] monitoringdashboard: implement sectionheader widget --- .../v1beta1/monitoringdashboard_types.go | 2 + .../v1beta1/zz_generated.deepcopy.go | 5 + ...ards.monitoring.cnrm.cloud.google.com.yaml | 48 +++++++ docs/releasenotes/release-1.120.md | 1 + .../v1beta1/monitoringdashboard_types.go | 18 +++ .../v1beta1/zz_generated.deepcopy.go | 36 +++++ .../dashboard_generated.mappings.go | 6 +- .../direct/monitoring/roundtrip_test.go | 2 - ...ated_export_monitoringdashboardfull.golden | 4 + ...object_monitoringdashboardfull.golden.yaml | 4 + .../monitoringdashboardfull/_http.log | 21 +++ .../monitoringdashboardfull/create.yaml | 4 + .../monitoring/monitoringdashboard.md | 132 ++++++++++++++++++ 13 files changed, 279 insertions(+), 4 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index b73adfe300..ffa9878362 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -477,11 +477,13 @@ type Widget struct { // A widget that displays a list of error groups. ErrorReportingPanel *ErrorReportingPanel `json:"errorReportingPanel,omitempty"` + */ // A widget that defines a section header for easier navigation of the // dashboard. SectionHeader *SectionHeader `json:"sectionHeader,omitempty"` + /*NOTYET // A widget that groups the other widgets by using a dropdown menu. SingleViewGroup *SingleViewGroup `json:"singleViewGroup,omitempty"` diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index cf6c976c05..40632a6103 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1246,6 +1246,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(LogsPanel) (*in).DeepCopyInto(*out) } + if in.SectionHeader != nil { + in, out := &in.SectionHeader, &out.SectionHeader + *out = new(SectionHeader) + (*in).DeepCopyInto(*out) + } return } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index c1cf5c63ae..edfab4f948 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -774,6 +774,18 @@ spec: required: - timeSeriesQuery type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below + the section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object text: description: A raw string or markdown displaying textual content. @@ -2171,6 +2183,18 @@ spec: required: - timeSeriesQuery type: object + sectionHeader: + description: A widget that defines a section header for + easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the section + in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object text: description: A raw string or markdown displaying textual content. @@ -3551,6 +3575,18 @@ spec: required: - timeSeriesQuery type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the + section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object text: description: A raw string or markdown displaying textual content. @@ -5024,6 +5060,18 @@ spec: required: - timeSeriesQuery type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below + the section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object text: description: A raw string or markdown displaying textual content. diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index a8f1a5e07f..39c575ff12 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -26,6 +26,7 @@ output fields from GCP APIs are in `status.observedState.*` * `MonitoringDashboard` * Added `style` fields to text widgets. + * Added `sectionHeader` widgets. * `StorageBucket` * Added `spec.softDeletePolicy` field. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index d66ba93636..705e1671e7 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -351,6 +351,16 @@ type DashboardSecondaryAggregation struct { PerSeriesAligner *string `json:"perSeriesAligner,omitempty"` } +type DashboardSectionHeader struct { + /* Whether to insert a divider below the section in the table of contents */ + // +optional + DividerBelow *bool `json:"dividerBelow,omitempty"` + + /* The subtitle of the section */ + // +optional + Subtitle *string `json:"subtitle,omitempty"` +} + type DashboardSparkChartView struct { /* The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint. */ // +optional @@ -510,6 +520,10 @@ type DashboardWidget struct { // +optional Scorecard *DashboardScorecard `json:"scorecard,omitempty"` + /* A widget that defines a section header for easier navigation of the dashboard. */ + // +optional + SectionHeader *DashboardSectionHeader `json:"sectionHeader,omitempty"` + /* A raw string or markdown displaying textual content. */ // +optional Text *DashboardText `json:"text,omitempty"` @@ -536,6 +550,10 @@ type DashboardWidgets struct { // +optional Scorecard *DashboardScorecard `json:"scorecard,omitempty"` + /* A widget that defines a section header for easier navigation of the dashboard. */ + // +optional + SectionHeader *DashboardSectionHeader `json:"sectionHeader,omitempty"` + /* A raw string or markdown displaying textual content. */ // +optional Text *DashboardText `json:"text,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index f170f0ee73..a13c4bcc8b 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1002,6 +1002,32 @@ func (in *DashboardSecondaryAggregation) DeepCopy() *DashboardSecondaryAggregati return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSectionHeader) DeepCopyInto(out *DashboardSectionHeader) { + *out = *in + if in.DividerBelow != nil { + in, out := &in.DividerBelow, &out.DividerBelow + *out = new(bool) + **out = **in + } + if in.Subtitle != nil { + in, out := &in.Subtitle, &out.Subtitle + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSectionHeader. +func (in *DashboardSectionHeader) DeepCopy() *DashboardSectionHeader { + if in == nil { + return nil + } + out := new(DashboardSectionHeader) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardSparkChartView) DeepCopyInto(out *DashboardSparkChartView) { *out = *in @@ -1303,6 +1329,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardScorecard) (*in).DeepCopyInto(*out) } + if in.SectionHeader != nil { + in, out := &in.SectionHeader, &out.SectionHeader + *out = new(DashboardSectionHeader) + (*in).DeepCopyInto(*out) + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(DashboardText) @@ -1349,6 +1380,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardScorecard) (*in).DeepCopyInto(*out) } + if in.SectionHeader != nil { + in, out := &in.SectionHeader, &out.SectionHeader + *out = new(DashboardSectionHeader) + (*in).DeepCopyInto(*out) + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(DashboardText) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 48b6172f21..ac1cb5bcf9 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -840,7 +840,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { // MISSING: IncidentList // MISSING: PieChart // MISSING: ErrorReportingPanel - // MISSING: SectionHeader + out.SectionHeader = SectionHeader_FromProto(mapCtx, in.GetSectionHeader()) // MISSING: SingleViewGroup // MISSING: Id return out @@ -872,7 +872,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { // MISSING: IncidentList // MISSING: PieChart // MISSING: ErrorReportingPanel - // MISSING: SectionHeader + if oneof := SectionHeader_ToProto(mapCtx, in.SectionHeader); oneof != nil { + out.Content = &pb.Widget_SectionHeader{SectionHeader: oneof} + } // MISSING: SingleViewGroup // MISSING: Id return out diff --git a/pkg/controller/direct/monitoring/roundtrip_test.go b/pkg/controller/direct/monitoring/roundtrip_test.go index 2d86cf5e14..be28b1b93c 100644 --- a/pkg/controller/direct/monitoring/roundtrip_test.go +++ b/pkg/controller/direct/monitoring/roundtrip_test.go @@ -82,8 +82,6 @@ func FuzzMonitoringDashboardSpec(f *testing.F) { unimplementedFields.Insert(widgetPath + ".single_view_group") - unimplementedFields.Insert(widgetPath + ".section_header") - unimplementedFields.Insert(widgetPath + ".time_series_table") unimplementedFields.Insert(widgetPath + ".error_reporting_panel") diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 682cd09570..e1bf15c75c 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -51,6 +51,10 @@ spec: - external: projects/${projectId} kind: Project title: Widget 4 + - sectionHeader: + dividerBelow: true + subtitle: Example SectionHeader + title: SectionHeader Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 3a80d17207..6ec527055a 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -59,6 +59,10 @@ spec: resourceNames: - external: projects/${projectId} title: Widget 4 + - sectionHeader: + dividerBelow: true + subtitle: Example SectionHeader + title: SectionHeader Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index e64f2d7e68..47673e9bb9 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -105,6 +105,13 @@ x-goog-request-params: parent=projects%2F${projectId} ] }, "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" } ] } @@ -203,6 +210,13 @@ X-Xss-Protection: 0 ] }, "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" } ] } @@ -309,6 +323,13 @@ X-Xss-Protection: 0 ] }, "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 3e3940202c..851158470c 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -65,3 +65,7 @@ spec: filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" resourceNames: - external: "projects/${projectId}" + - title: "SectionHeader Widget" + sectionHeader: + dividerBelow: true + subtitle: "Example SectionHeader" diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 8ab2101e54..e10eb9c66d 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -149,6 +149,9 @@ columnLayout: perSeriesAligner: string timeSeriesQueryLanguage: string unitOverride: string + sectionHeader: + dividerBelow: boolean + subtitle: string text: content: string format: string @@ -300,6 +303,9 @@ gridLayout: perSeriesAligner: string timeSeriesQueryLanguage: string unitOverride: string + sectionHeader: + dividerBelow: boolean + subtitle: string text: content: string format: string @@ -452,6 +458,9 @@ mosaicLayout: perSeriesAligner: string timeSeriesQueryLanguage: string unitOverride: string + sectionHeader: + dividerBelow: boolean + subtitle: string text: content: string format: string @@ -612,6 +621,9 @@ rowLayout: perSeriesAligner: string timeSeriesQueryLanguage: string unitOverride: string + sectionHeader: + dividerBelow: boolean + subtitle: string text: content: string format: string @@ -1692,6 +1704,36 @@ rowLayout:

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ +

columnLayout.columns[].widgets[].text

@@ -3679,6 +3721,36 @@ rowLayout:

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + +

gridLayout.widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ +

gridLayout.widgets[].text

@@ -5676,6 +5748,36 @@ rowLayout:

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.text

@@ -7763,6 +7865,36 @@ rowLayout:

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ +

rowLayout.rows[].widgets[].text

From 1c61308b1fe4d9fa1ea3483df5d62439075683e0 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Wed, 26 Jun 2024 00:51:52 +0000 Subject: [PATCH 035/101] docs: add edge case api ref Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../generated/resource-docs/cloudids/cloudidsendpoint.md | 4 ++++ .../templates/cloudids_cloudidsendpoint.tmpl | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md index 9d8a5dcb38..064240f3a4 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -28,6 +28,10 @@ v1.projects.locations.endpoints +{{gcp_name_short}} REST Resource Documentation +/intrusion-detection-system/docs/configuring-ids#api + + {{product_name_short}} Resource Short Names gcpcloudidsendpoint
gcpcloudidsendpoints
cloudidsendpoint diff --git a/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl b/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl index d552a2251e..6d1ca3c5a6 100644 --- a/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl +++ b/scripts/generate-google3-docs/resource-reference/templates/cloudids_cloudidsendpoint.tmpl @@ -27,6 +27,10 @@ v1.projects.locations.endpoints +{{"{{gcp_name_short}}"}} REST Resource Documentation +/intrusion-detection-system/docs/configuring-ids#api + + {{"{{product_name_short}}"}} Resource Short Names {{ .ShortNames}} From 0edae9e9f978463a2390aef65f8ae441244166e1 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Wed, 26 Jun 2024 08:08:46 +0000 Subject: [PATCH 036/101] fix: cbwp update the API to use auto-generated struct --- apis/cloudbuild/v1alpha1/conversion.go | 62 +++++++++---- apis/cloudbuild/v1alpha1/doc.go | 17 ++++ apis/cloudbuild/v1alpha1/workerpool_types.go | 90 +++++++++++++------ .../v1alpha1/zz_generated.deepcopy.go | 79 ++++++++-------- ...ools.cloudbuild.cnrm.cloud.google.com.yaml | 85 ++++++++++++++++-- dev/tools/proto-to-mapper/Makefile | 1 + .../v1alpha1/cloudbuildworkerpool_types.go | 30 +++++-- .../v1alpha1/zz_generated.deepcopy.go | 27 +++--- .../cloudbuild/workerpool_controller.go | 8 +- ...ed_object_cloudbuildworkerpool.golden.yaml | 3 +- .../v1alpha1/cloudbuildworkerpool/_http.log | 2 +- 11 files changed, 283 insertions(+), 121 deletions(-) create mode 100644 apis/cloudbuild/v1alpha1/doc.go diff --git a/apis/cloudbuild/v1alpha1/conversion.go b/apis/cloudbuild/v1alpha1/conversion.go index 11bc325119..13badf11a4 100644 --- a/apis/cloudbuild/v1alpha1/conversion.go +++ b/apis/cloudbuild/v1alpha1/conversion.go @@ -18,6 +18,7 @@ import ( "fmt" cloudbuildpb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" + refv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" ) func Convert_WorkerPool_API_v1_To_KRM_status(in *cloudbuildpb.WorkerPool, out *CloudBuildWorkerPoolStatus) error { @@ -37,44 +38,47 @@ func Convert_PrivatePoolV1Config_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Con if in == nil { return nil } - out.NetworkConfig = &NetworkConfigState{} + out.NetworkConfig = &PrivatePoolV1Config_NetworkConfig{} if err := Convert_NetworkConfig_API_v1_To_KRM(in.NetworkConfig, out.NetworkConfig); err != nil { return err } - out.WorkerConfig = &WorkerConfig{} + out.WorkerConfig = &PrivatePoolV1Config_WorkerConfig{} if err := Convert_WorkerConfig_API_v1_To_KRM(in.WorkerConfig, out.WorkerConfig); err != nil { return err } return nil } -func Convert_NetworkConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_NetworkConfig, out *NetworkConfigState) error { +func Convert_NetworkConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_NetworkConfig, out *PrivatePoolV1Config_NetworkConfig) error { if in == nil { return nil } switch in.EgressOption { case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_EGRESS_OPTION_UNSPECIFIED: - out.EgressOption = "EGRESS_OPTION_UNSPECIFIED" + out.EgressOption = LazyPtr("EGRESS_OPTION_UNSPECIFIED") case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_NO_PUBLIC_EGRESS: - out.EgressOption = "NO_PUBLIC_EGRESS" + out.EgressOption = LazyPtr("NO_PUBLIC_EGRESS") case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_PUBLIC_EGRESS: - out.EgressOption = "PUBLIC_EGRESS" + out.EgressOption = LazyPtr("PUBLIC_EGRESS") default: - return fmt.Errorf("unknown egressoption %s", out.EgressOption) + return fmt.Errorf("unknown egressoption %s", in.EgressOption) + } + + out.PeeredNetworkIPRange = PtrTo(in.GetPeeredNetworkIpRange()) + out.PeeredNetworkRef = refv1beta1.ComputeNetworkRef{ + External: in.GetPeeredNetwork(), } - out.PeeredNetwork = in.PeeredNetwork - out.PeeredNetworkIPRange = in.PeeredNetworkIpRange return nil } -func Convert_WorkerConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_WorkerConfig, out *WorkerConfig) error { +func Convert_WorkerConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_WorkerConfig, out *PrivatePoolV1Config_WorkerConfig) error { if in == nil { return nil } - out.DiskSizeGb = in.DiskSizeGb - out.MachineType = in.MachineType + out.DiskSizeGb = LazyPtr(in.GetDiskSizeGb()) + out.MachineType = LazyPtr(in.GetMachineType()) return nil } @@ -116,15 +120,15 @@ func Convert_PrivatePoolV1Config_KRM_To_API_v1(in *PrivatePoolV1Config, out *clo return nil } -func Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in *NetworkConfig, out *cloudbuildpb.PrivatePoolV1Config_NetworkConfig) error { +func Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in *PrivatePoolV1Config_NetworkConfig, out *cloudbuildpb.PrivatePoolV1Config_NetworkConfig) error { if in == nil { return nil } obj := in.DeepCopy() - out.PeeredNetworkIpRange = obj.PeeredNetworkIPRange + out.PeeredNetworkIpRange = ValueOf(obj.PeeredNetworkIPRange) // custom - switch obj.EgressOption { + switch ValueOf(obj.EgressOption) { case "EGRESS_OPTION_UNSPECIFIED": out.EgressOption = 0 case "NO_PUBLIC_EGRESS": @@ -132,7 +136,7 @@ func Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in *NetworkConfig, case "PUBLIC_EGRESS": out.EgressOption = 2 default: - return fmt.Errorf("unknown egressoption %s", obj.EgressOption) + return fmt.Errorf("unknown egressoption %s", ValueOf(obj.EgressOption)) } if obj.PeeredNetworkRef.External != "" { @@ -141,12 +145,32 @@ func Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in *NetworkConfig, return nil } -func Convert_PrivatePoolV1Config_WorkerConfig_KRM_To_API_v1(in *WorkerConfig, out *cloudbuildpb.PrivatePoolV1Config_WorkerConfig) error { +func Convert_PrivatePoolV1Config_WorkerConfig_KRM_To_API_v1(in *PrivatePoolV1Config_WorkerConfig, out *cloudbuildpb.PrivatePoolV1Config_WorkerConfig) error { if in == nil { return nil } obj := in.DeepCopy() - out.DiskSizeGb = obj.DiskSizeGb - out.MachineType = obj.MachineType + out.DiskSizeGb = ValueOf(obj.DiskSizeGb) + out.MachineType = ValueOf(obj.MachineType) return nil } + +func PtrTo[T any](t T) *T { + return &t +} + +func ValueOf[T any](t *T) T { + var zeroVal T + if t == nil { + return zeroVal + } + return *t +} + +func LazyPtr[T comparable](v T) *T { + var defaultValue T + if v == defaultValue { + return nil + } + return &v +} diff --git a/apis/cloudbuild/v1alpha1/doc.go b/apis/cloudbuild/v1alpha1/doc.go new file mode 100644 index 0000000000..c42d7b0d60 --- /dev/null +++ b/apis/cloudbuild/v1alpha1/doc.go @@ -0,0 +1,17 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kcc:proto=google.devtools.cloudbuild.v1 + +package v1alpha1 diff --git a/apis/cloudbuild/v1alpha1/workerpool_types.go b/apis/cloudbuild/v1alpha1/workerpool_types.go index 0438745320..82dc4ce3dc 100644 --- a/apis/cloudbuild/v1alpha1/workerpool_types.go +++ b/apis/cloudbuild/v1alpha1/workerpool_types.go @@ -22,40 +22,76 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +kcc:proto=google.devtools.cloudbuild.v1.WorkerPool // CloudBuildWorkerPoolSpec defines the desired state of Instance type CloudBuildWorkerPoolSpec struct { - Name string `json:"name,omitempty"` - DisplayName string `json:"displayName,omitempty"` - ResourceID *string `json:"resourceID,omitempty"` + + // A user-specified, human-readable name for the `WorkerPool`. If provided, + // this value must be 1-63 characters. + DisplayName string `json:"displayName,omitempty"` + + // The `WorkerPool` name. If not given, the metadata.name will be used. + // + optional + ResourceID *string `json:"resourceID,omitempty"` + // +required ProjectRef *refv1beta1.ProjectRef `json:"projectRef"` + // +required Location string `json:"location"` + + // Legacy Private Pool configuration. // +required PrivatePoolConfig *PrivatePoolV1Config `json:"privatePoolV1Config,omitempty"` + + // TODO: support annotations } +// +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config type PrivatePoolV1Config struct { + // Machine configuration for the workers in the pool. // +required - WorkerConfig *WorkerConfig `json:"workerConfig,omitempty"` - // +optional - NetworkConfig *NetworkConfig `json:"networkConfig,omitempty"` + WorkerConfig *PrivatePoolV1Config_WorkerConfig `json:"workerConfig,omitempty"` + + // Network configuration for the pool. + NetworkConfig *PrivatePoolV1Config_NetworkConfig `json:"networkConfig,omitempty"` } -type WorkerConfig struct { - // +optional - MachineType string `json:"machineType,omitempty"` - // +optional - DiskSizeGb int64 `json:"diskSizeGb,omitempty"` +// +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config.WorkerConfig +type PrivatePoolV1Config_WorkerConfig struct { + // Machine type of a worker, such as `e2-medium`. + // See [Worker pool config + // file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + // If left blank, Cloud Build will use a sensible default. + MachineType *string `json:"machineType,omitempty"` + + // Size of the disk attached to the worker, in GB. + // See [Worker pool config + // file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + // Specify a value of up to 2000. If `0` is specified, Cloud Build will use + // a standard disk size. + DiskSizeGb *int64 `json:"diskSizeGb,omitempty"` } -type NetworkConfig struct { - // +required +// +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config.NetworkConfig +type PrivatePoolV1Config_NetworkConfig struct { + // Immutable. The network definition that the workers are peered + // to. If this section is left empty, the workers will be peered to + // `WorkerPool.project_id` on the service producer network. PeeredNetworkRef refv1beta1.ComputeNetworkRef `json:"peeredNetworkRef,omitempty"` - // +optional - EgressOption string `json:"egressOption,omitempty"` - // +optional - PeeredNetworkIPRange string `json:"peeredNetworkIPRange,omitempty"` + + // Option to configure network egress for the workers. + EgressOption *string `json:"egressOption,omitempty"` + + // Immutable. Subnet IP range within the peered network. This is specified + // in CIDR notation with a slash and the subnet prefix size. You can + // optionally specify an IP address before the subnet prefix value. e.g. + // `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a + // prefix size of 29 bits. + // `/16` would specify a prefix size of 16 bits, with an automatically + // determined IP within the peered VPC. + // If unspecified, a value of `/24` will be used. + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` } // CloudBuildWorkerPoolStatus defines the observed state of Instance @@ -77,6 +113,7 @@ type CloudBuildWorkerPoolStatus struct { ObservedState *CloudBuildWorkerPoolObservedState `json:"observedState,omitempty"` } +// +kcc:proto=google.devtools.cloudbuild.v1.WorkerPool type CloudBuildWorkerPoolObservedState struct { /* The creation timestamp of the workerpool.*/ // +optional @@ -87,24 +124,19 @@ type CloudBuildWorkerPoolObservedState struct { // +optional // +kubebuilder:validation:Format=date-time UpdateTime *string `json:"updateTime,omitempty"` - // +optional - WorkerConfig *WorkerConfig `json:"workerConfig,omitempty"` - NetworkConfig *NetworkConfigState `json:"networkConfig,omitempty"` + + // Machine configuration for the workers in the pool. + // +required + WorkerConfig *PrivatePoolV1Config_WorkerConfig `json:"workerConfig,omitempty"` + + // Network configuration for the pool. + NetworkConfig *PrivatePoolV1Config_NetworkConfig `json:"networkConfig,omitempty"` /* The Checksum computed by the server, using weak indicator.*/ // +optional ETag *string `json:"etag,omitempty"` } -type NetworkConfigState struct { - // +optional - PeeredNetwork string `json:"peeredNetwork,omitempty"` - // +optional - EgressOption string `json:"egressOption,omitempty"` - // +optional - PeeredNetworkIPRange string `json:"peeredNetworkIPRange,omitempty"` -} - // +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=beta" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CloudBuildWorkerPool is the Schema for the CloudBuild WorkerPool API diff --git a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go index 8ba75ff863..34b685e81f 100644 --- a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go @@ -84,13 +84,13 @@ func (in *CloudBuildWorkerPoolObservedState) DeepCopyInto(out *CloudBuildWorkerP } if in.WorkerConfig != nil { in, out := &in.WorkerConfig, &out.WorkerConfig - *out = new(WorkerConfig) - **out = **in + *out = new(PrivatePoolV1Config_WorkerConfig) + (*in).DeepCopyInto(*out) } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(NetworkConfigState) - **out = **in + *out = new(PrivatePoolV1Config_NetworkConfig) + (*in).DeepCopyInto(*out) } if in.ETag != nil { in, out := &in.ETag, &out.ETag @@ -175,72 +175,77 @@ func (in *CloudBuildWorkerPoolStatus) DeepCopy() *CloudBuildWorkerPoolStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkConfig) DeepCopyInto(out *NetworkConfig) { +func (in *PrivatePoolV1Config) DeepCopyInto(out *PrivatePoolV1Config) { *out = *in - out.PeeredNetworkRef = in.PeeredNetworkRef -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfig. -func (in *NetworkConfig) DeepCopy() *NetworkConfig { - if in == nil { - return nil + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = new(PrivatePoolV1Config_WorkerConfig) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfig != nil { + in, out := &in.NetworkConfig, &out.NetworkConfig + *out = new(PrivatePoolV1Config_NetworkConfig) + (*in).DeepCopyInto(*out) } - out := new(NetworkConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkConfigState) DeepCopyInto(out *NetworkConfigState) { - *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigState. -func (in *NetworkConfigState) DeepCopy() *NetworkConfigState { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config. +func (in *PrivatePoolV1Config) DeepCopy() *PrivatePoolV1Config { if in == nil { return nil } - out := new(NetworkConfigState) + out := new(PrivatePoolV1Config) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivatePoolV1Config) DeepCopyInto(out *PrivatePoolV1Config) { +func (in *PrivatePoolV1Config_NetworkConfig) DeepCopyInto(out *PrivatePoolV1Config_NetworkConfig) { *out = *in - if in.WorkerConfig != nil { - in, out := &in.WorkerConfig, &out.WorkerConfig - *out = new(WorkerConfig) + out.PeeredNetworkRef = in.PeeredNetworkRef + if in.EgressOption != nil { + in, out := &in.EgressOption, &out.EgressOption + *out = new(string) **out = **in } - if in.NetworkConfig != nil { - in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(NetworkConfig) + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config. -func (in *PrivatePoolV1Config) DeepCopy() *PrivatePoolV1Config { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config_NetworkConfig. +func (in *PrivatePoolV1Config_NetworkConfig) DeepCopy() *PrivatePoolV1Config_NetworkConfig { if in == nil { return nil } - out := new(PrivatePoolV1Config) + out := new(PrivatePoolV1Config_NetworkConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerConfig) DeepCopyInto(out *WorkerConfig) { +func (in *PrivatePoolV1Config_WorkerConfig) DeepCopyInto(out *PrivatePoolV1Config_WorkerConfig) { *out = *in + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(int64) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfig. -func (in *WorkerConfig) DeepCopy() *WorkerConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config_WorkerConfig. +func (in *PrivatePoolV1Config_WorkerConfig) DeepCopy() *PrivatePoolV1Config_WorkerConfig { if in == nil { return nil } - out := new(WorkerConfig) + out := new(PrivatePoolV1Config_WorkerConfig) in.DeepCopyInto(out) return out } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml index b370c4c134..f7f83b9bd6 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml @@ -40,20 +40,35 @@ spec: description: CloudBuildWorkerPoolSpec defines the desired state of Instance properties: displayName: + description: A user-specified, human-readable name for the `WorkerPool`. + If provided, this value must be 1-63 characters. type: string location: type: string - name: - type: string privatePoolV1Config: + description: Legacy Private Pool configuration. properties: networkConfig: + description: Network configuration for the pool. properties: egressOption: + description: Option to configure network egress for the workers. type: string peeredNetworkIPRange: + description: Immutable. Subnet IP range within the peered + network. This is specified in CIDR notation with a slash + and the subnet prefix size. You can optionally specify an + IP address before the subnet prefix value. e.g. `192.168.0.0/29` + would specify an IP range starting at 192.168.0.0 with a + prefix size of 29 bits. `/16` would specify a prefix size + of 16 bits, with an automatically determined IP within the + peered VPC. If unspecified, a value of `/24` will be used. type: string peeredNetworkRef: + description: Immutable. The network definition that the workers + are peered to. If this section is left empty, the workers + will be peered to `WorkerPool.project_id` on the service + producer network. oneOf: - not: required: @@ -81,15 +96,21 @@ spec: resource. type: string type: object - required: - - peeredNetworkRef type: object workerConfig: + description: Machine configuration for the workers in the pool. properties: diskSizeGb: + description: Size of the disk attached to the worker, in GB. + See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + Specify a value of up to 2000. If `0` is specified, Cloud + Build will use a standard disk size. format: int64 type: integer machineType: + description: Machine type of a worker, such as `e2-medium`. + See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + If left blank, Cloud Build will use a sensible default. type: string type: object required: @@ -128,6 +149,8 @@ spec: type: string type: object resourceID: + description: The `WorkerPool` name. If not given, the metadata.name + will be used. type: string required: - location @@ -188,26 +211,76 @@ spec: description: The Checksum computed by the server, using weak indicator. type: string networkConfig: + description: Network configuration for the pool. properties: egressOption: - type: string - peeredNetwork: + description: Option to configure network egress for the workers. type: string peeredNetworkIPRange: + description: Immutable. Subnet IP range within the peered + network. This is specified in CIDR notation with a slash + and the subnet prefix size. You can optionally specify an + IP address before the subnet prefix value. e.g. `192.168.0.0/29` + would specify an IP range starting at 192.168.0.0 with a + prefix size of 29 bits. `/16` would specify a prefix size + of 16 bits, with an automatically determined IP within the + peered VPC. If unspecified, a value of `/24` will be used. type: string + peeredNetworkRef: + description: Immutable. The network definition that the workers + are peered to. If this section is left empty, the workers + will be peered to `WorkerPool.project_id` on the service + producer network. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The compute network selflink of form "projects//global/networks/", + when not managed by KCC. + type: string + name: + description: The `name` field of a `ComputeNetwork` resource. + type: string + namespace: + description: The `namespace` field of a `ComputeNetwork` + resource. + type: string + type: object type: object updateTime: description: The last update timestamp of the workerpool. format: date-time type: string workerConfig: + description: Machine configuration for the workers in the pool. properties: diskSizeGb: + description: Size of the disk attached to the worker, in GB. + See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + Specify a value of up to 2000. If `0` is specified, Cloud + Build will use a standard disk size. format: int64 type: integer machineType: + description: Machine type of a worker, such as `e2-medium`. + See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). + If left blank, Cloud Build will use a sensible default. type: string type: object + required: + - workerConfig type: object type: object type: object diff --git a/dev/tools/proto-to-mapper/Makefile b/dev/tools/proto-to-mapper/Makefile index 417c56a0cc..33eecff19a 100644 --- a/dev/tools/proto-to-mapper/Makefile +++ b/dev/tools/proto-to-mapper/Makefile @@ -11,6 +11,7 @@ generate-pb: install-protoc-linux ./third_party/googleapis/google/iam/v1/*.proto \ ./third_party/googleapis/google/logging/v2/*.proto \ ./third_party/googleapis/google/monitoring/dashboard/v1/*.proto \ + ./third_party/googleapis/google/devtools/cloudbuild/*/*.proto \ -o build/googleapis.pb go run . -apis ../../../apis/ --api-packages github.com/GoogleCloudPlatform/apis diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go index ac74f7c809..34c5a7af91 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go @@ -36,57 +36,68 @@ import ( ) type WorkerpoolNetworkConfig struct { + /* Option to configure network egress for the workers. */ // +optional EgressOption *string `json:"egressOption,omitempty"` + /* Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. */ // +optional PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` - PeeredNetworkRef v1alpha1.ResourceRef `json:"peeredNetworkRef"` + /* Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. */ + // +optional + PeeredNetworkRef *v1alpha1.ResourceRef `json:"peeredNetworkRef,omitempty"` } type WorkerpoolPrivatePoolV1Config struct { + /* Network configuration for the pool. */ // +optional NetworkConfig *WorkerpoolNetworkConfig `json:"networkConfig,omitempty"` + /* Machine configuration for the workers in the pool. */ WorkerConfig WorkerpoolWorkerConfig `json:"workerConfig"` } type WorkerpoolWorkerConfig struct { + /* Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. */ // +optional DiskSizeGb *int64 `json:"diskSizeGb,omitempty"` + /* Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. */ // +optional MachineType *string `json:"machineType,omitempty"` } type CloudBuildWorkerPoolSpec struct { + /* A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters. */ // +optional DisplayName *string `json:"displayName,omitempty"` Location string `json:"location"` - // +optional - Name *string `json:"name,omitempty"` - + /* Legacy Private Pool configuration. */ PrivatePoolV1Config WorkerpoolPrivatePoolV1Config `json:"privatePoolV1Config"` /* The Project that this resource belongs to. */ ProjectRef v1alpha1.ResourceRef `json:"projectRef"` + /* The `WorkerPool` name. If not given, the metadata.name will be used. */ // +optional ResourceID *string `json:"resourceID,omitempty"` } type WorkerpoolNetworkConfigStatus struct { + /* Option to configure network egress for the workers. */ // +optional EgressOption *string `json:"egressOption,omitempty"` + /* Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. */ // +optional - PeeredNetwork *string `json:"peeredNetwork,omitempty"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` + /* Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. */ // +optional - PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` + PeeredNetworkRef *v1alpha1.ResourceRef `json:"peeredNetworkRef,omitempty"` } type WorkerpoolObservedStateStatus struct { @@ -98,6 +109,7 @@ type WorkerpoolObservedStateStatus struct { // +optional Etag *string `json:"etag,omitempty"` + /* Network configuration for the pool. */ // +optional NetworkConfig *WorkerpoolNetworkConfigStatus `json:"networkConfig,omitempty"` @@ -105,14 +117,16 @@ type WorkerpoolObservedStateStatus struct { // +optional UpdateTime *string `json:"updateTime,omitempty"` - // +optional - WorkerConfig *WorkerpoolWorkerConfigStatus `json:"workerConfig,omitempty"` + /* Machine configuration for the workers in the pool. */ + WorkerConfig WorkerpoolWorkerConfigStatus `json:"workerConfig"` } type WorkerpoolWorkerConfigStatus struct { + /* Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size. */ // +optional DiskSizeGb *int64 `json:"diskSizeGb,omitempty"` + /* Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default. */ // +optional MachineType *string `json:"machineType,omitempty"` } diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go index 2ed885b8f9..8969c597aa 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go @@ -98,11 +98,6 @@ func (in *CloudBuildWorkerPoolSpec) DeepCopyInto(out *CloudBuildWorkerPoolSpec) *out = new(string) **out = **in } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } in.PrivatePoolV1Config.DeepCopyInto(&out.PrivatePoolV1Config) out.ProjectRef = in.ProjectRef if in.ResourceID != nil { @@ -172,7 +167,11 @@ func (in *WorkerpoolNetworkConfig) DeepCopyInto(out *WorkerpoolNetworkConfig) { *out = new(string) **out = **in } - out.PeeredNetworkRef = in.PeeredNetworkRef + if in.PeeredNetworkRef != nil { + in, out := &in.PeeredNetworkRef, &out.PeeredNetworkRef + *out = new(k8sv1alpha1.ResourceRef) + **out = **in + } return } @@ -194,16 +193,16 @@ func (in *WorkerpoolNetworkConfigStatus) DeepCopyInto(out *WorkerpoolNetworkConf *out = new(string) **out = **in } - if in.PeeredNetwork != nil { - in, out := &in.PeeredNetwork, &out.PeeredNetwork - *out = new(string) - **out = **in - } if in.PeeredNetworkIPRange != nil { in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange *out = new(string) **out = **in } + if in.PeeredNetworkRef != nil { + in, out := &in.PeeredNetworkRef, &out.PeeredNetworkRef + *out = new(k8sv1alpha1.ResourceRef) + **out = **in + } return } @@ -240,11 +239,7 @@ func (in *WorkerpoolObservedStateStatus) DeepCopyInto(out *WorkerpoolObservedSta *out = new(string) **out = **in } - if in.WorkerConfig != nil { - in, out := &in.WorkerConfig, &out.WorkerConfig - *out = new(WorkerpoolWorkerConfigStatus) - (*in).DeepCopyInto(*out) - } + in.WorkerConfig.DeepCopyInto(&out.WorkerConfig) return } diff --git a/pkg/controller/direct/cloudbuild/workerpool_controller.go b/pkg/controller/direct/cloudbuild/workerpool_controller.go index 87ff62935b..e882328be6 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_controller.go +++ b/pkg/controller/direct/cloudbuild/workerpool_controller.go @@ -226,19 +226,19 @@ func (a *Adapter) Update(ctx context.Context, u *unstructured.Unstructured) erro if desiredConfig.NetworkConfig != nil { switch actualConfig.NetworkConfig.EgressOption { case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_EGRESS_OPTION_UNSPECIFIED: - if !reflect.DeepEqual(desiredConfig.NetworkConfig.EgressOption, "UNSPECIFIED") { + if !reflect.DeepEqual(ValueOf(desiredConfig.NetworkConfig.EgressOption), "UNSPECIFIED") { updateMask.Paths = append(updateMask.Paths, "private_pool_v1_config.network_config.egress_option") } case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_NO_PUBLIC_EGRESS: - if !reflect.DeepEqual(desiredConfig.NetworkConfig.EgressOption, "NO_PUBLIC_EGRESS") { + if !reflect.DeepEqual(ValueOf(desiredConfig.NetworkConfig.EgressOption), "NO_PUBLIC_EGRESS") { updateMask.Paths = append(updateMask.Paths, "private_pool_v1_config.network_config.egress_option") } case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_PUBLIC_EGRESS: - if !reflect.DeepEqual(desiredConfig.NetworkConfig.EgressOption, "PUBLIC_EGRESS") { + if !reflect.DeepEqual(ValueOf(desiredConfig.NetworkConfig.EgressOption), "PUBLIC_EGRESS") { updateMask.Paths = append(updateMask.Paths, "private_pool_v1_config.network_config.egress_option") } } - expectedIPRange := desiredConfig.NetworkConfig.PeeredNetworkIPRange + expectedIPRange := ValueOf(desiredConfig.NetworkConfig.PeeredNetworkIPRange) if expectedIPRange != "" && !reflect.DeepEqual(expectedIPRange, actualConfig.NetworkConfig.PeeredNetworkIpRange) { updateMask.Paths = append(updateMask.Paths, "private_pool_v1_config.network_config.peered_network_ip_range") } diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index 94c00dde23..dbe2301dc7 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -37,8 +37,9 @@ status: etag: abcdef123456 networkConfig: egressOption: NO_PUBLIC_EGRESS - peeredNetwork: projects/${projectId}/global/networks/computenetwork-${uniqueId} peeredNetworkIPRange: /29 + peeredNetworkRef: + external: projects/${projectId}/global/networks/computenetwork-${uniqueId} updateTime: "1970-01-01T00:00:00Z" workerConfig: diskSizeGb: 100 diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log index 48cd99d676..0a16b7b480 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log @@ -646,7 +646,7 @@ X-Xss-Protection: 0 --- -PATCH https://cloudbuild.googleapis.com/v1/projects/${projectId}/locations/us-central1/workerPools/cloudbuildworkerpool-${uniqueId}?%24alt=json%3Benum-encoding%3Dint&updateMask=privatePoolV1Config.workerConfig.diskSizeGb +PATCH https://cloudbuild.googleapis.com/v1/projects/${projectId}/locations/us-central1/workerPools/cloudbuildworkerpool-${uniqueId}?%24alt=json%3Benum-encoding%3Dint&updateMask=privatePoolV1Config.workerConfig.diskSizeGb%2CprivatePoolV1Config.workerConfig.machineType Content-Type: application/json x-goog-request-params: location=us-central1 From d70d4b282fc17ad6d65c2a4ec07660bdd51ab3b2 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Wed, 26 Jun 2024 09:26:33 +0000 Subject: [PATCH 037/101] chore: use auto-generated mapper in cbwp --- apis/cloudbuild/v1alpha1/conversion.go | 176 ------------------ pkg/controller/direct/cloudbuild/maputils.go | 100 ++++++++++ .../cloudbuild/workerpool_controller.go | 58 ++---- .../direct/cloudbuild/workerpool_mappings.go | 109 +++++++++++ 4 files changed, 228 insertions(+), 215 deletions(-) delete mode 100644 apis/cloudbuild/v1alpha1/conversion.go create mode 100644 pkg/controller/direct/cloudbuild/maputils.go create mode 100644 pkg/controller/direct/cloudbuild/workerpool_mappings.go diff --git a/apis/cloudbuild/v1alpha1/conversion.go b/apis/cloudbuild/v1alpha1/conversion.go deleted file mode 100644 index 13badf11a4..0000000000 --- a/apis/cloudbuild/v1alpha1/conversion.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha1 - -import ( - "fmt" - - cloudbuildpb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" - refv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" -) - -func Convert_WorkerPool_API_v1_To_KRM_status(in *cloudbuildpb.WorkerPool, out *CloudBuildWorkerPoolStatus) error { - if in == nil { - return nil - } - out.ObservedState = &CloudBuildWorkerPoolObservedState{} - - out.ObservedState.ETag = &in.Etag - if err := Convert_PrivatePoolV1Config_API_v1_To_KRM(in.GetPrivatePoolV1Config(), out.ObservedState); err != nil { - return err - } - return nil -} - -func Convert_PrivatePoolV1Config_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config, out *CloudBuildWorkerPoolObservedState) error { - if in == nil { - return nil - } - out.NetworkConfig = &PrivatePoolV1Config_NetworkConfig{} - if err := Convert_NetworkConfig_API_v1_To_KRM(in.NetworkConfig, out.NetworkConfig); err != nil { - return err - } - out.WorkerConfig = &PrivatePoolV1Config_WorkerConfig{} - if err := Convert_WorkerConfig_API_v1_To_KRM(in.WorkerConfig, out.WorkerConfig); err != nil { - return err - } - return nil -} - -func Convert_NetworkConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_NetworkConfig, out *PrivatePoolV1Config_NetworkConfig) error { - if in == nil { - return nil - } - - switch in.EgressOption { - case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_EGRESS_OPTION_UNSPECIFIED: - out.EgressOption = LazyPtr("EGRESS_OPTION_UNSPECIFIED") - case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_NO_PUBLIC_EGRESS: - out.EgressOption = LazyPtr("NO_PUBLIC_EGRESS") - case cloudbuildpb.PrivatePoolV1Config_NetworkConfig_PUBLIC_EGRESS: - out.EgressOption = LazyPtr("PUBLIC_EGRESS") - default: - return fmt.Errorf("unknown egressoption %s", in.EgressOption) - } - - out.PeeredNetworkIPRange = PtrTo(in.GetPeeredNetworkIpRange()) - out.PeeredNetworkRef = refv1beta1.ComputeNetworkRef{ - External: in.GetPeeredNetwork(), - } - - return nil -} - -func Convert_WorkerConfig_API_v1_To_KRM(in *cloudbuildpb.PrivatePoolV1Config_WorkerConfig, out *PrivatePoolV1Config_WorkerConfig) error { - if in == nil { - return nil - } - out.DiskSizeGb = LazyPtr(in.GetDiskSizeGb()) - out.MachineType = LazyPtr(in.GetMachineType()) - return nil -} - -func Convert_WorkerPool_KRM_To_API_v1(in *CloudBuildWorkerPool, out *cloudbuildpb.WorkerPool) error { - if in == nil { - return nil - } - // CloudBuildWorkerPool API has "Name" as output only field. - // The "Name" is of the form "projects//locations//workerpools/" - // out.Name = in.Name - out.DisplayName = in.Spec.DisplayName - - // Custom - outConfig := &cloudbuildpb.PrivatePoolV1Config{} - if err := Convert_PrivatePoolV1Config_KRM_To_API_v1(in.Spec.PrivatePoolConfig, outConfig); err != nil { - return err - } - out.Config = &cloudbuildpb.WorkerPool_PrivatePoolV1Config{ - PrivatePoolV1Config: outConfig, - } - return nil -} - -func Convert_PrivatePoolV1Config_KRM_To_API_v1(in *PrivatePoolV1Config, out *cloudbuildpb.PrivatePoolV1Config) error { - if in == nil { - return nil - } - networkconfig := &cloudbuildpb.PrivatePoolV1Config_NetworkConfig{} - if err := Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in.NetworkConfig, networkconfig); err != nil { - return err - } - out.NetworkConfig = networkconfig - - workerconfig := &cloudbuildpb.PrivatePoolV1Config_WorkerConfig{} - if err := Convert_PrivatePoolV1Config_WorkerConfig_KRM_To_API_v1(in.WorkerConfig, workerconfig); err != nil { - return err - } - out.WorkerConfig = workerconfig - return nil -} - -func Convert_PrivatePoolV1Config_NetworkConfig_KRM_To_API_v1(in *PrivatePoolV1Config_NetworkConfig, out *cloudbuildpb.PrivatePoolV1Config_NetworkConfig) error { - if in == nil { - return nil - } - obj := in.DeepCopy() - out.PeeredNetworkIpRange = ValueOf(obj.PeeredNetworkIPRange) - - // custom - switch ValueOf(obj.EgressOption) { - case "EGRESS_OPTION_UNSPECIFIED": - out.EgressOption = 0 - case "NO_PUBLIC_EGRESS": - out.EgressOption = 1 - case "PUBLIC_EGRESS": - out.EgressOption = 2 - default: - return fmt.Errorf("unknown egressoption %s", ValueOf(obj.EgressOption)) - } - - if obj.PeeredNetworkRef.External != "" { - out.PeeredNetwork = obj.PeeredNetworkRef.External - } - return nil -} - -func Convert_PrivatePoolV1Config_WorkerConfig_KRM_To_API_v1(in *PrivatePoolV1Config_WorkerConfig, out *cloudbuildpb.PrivatePoolV1Config_WorkerConfig) error { - if in == nil { - return nil - } - obj := in.DeepCopy() - out.DiskSizeGb = ValueOf(obj.DiskSizeGb) - out.MachineType = ValueOf(obj.MachineType) - return nil -} - -func PtrTo[T any](t T) *T { - return &t -} - -func ValueOf[T any](t *T) T { - var zeroVal T - if t == nil { - return zeroVal - } - return *t -} - -func LazyPtr[T comparable](v T) *T { - var defaultValue T - if v == defaultValue { - return nil - } - return &v -} diff --git a/pkg/controller/direct/cloudbuild/maputils.go b/pkg/controller/direct/cloudbuild/maputils.go new file mode 100644 index 0000000000..87dc31dcc2 --- /dev/null +++ b/pkg/controller/direct/cloudbuild/maputils.go @@ -0,0 +1,100 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudbuild + +import ( + "errors" + "fmt" + "strings" + "time" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type MapContext struct { + errs []error +} + +func (c *MapContext) Errorf(msg string, args ...interface{}) { + c.errs = append(c.errs, fmt.Errorf(msg, args...)) +} + +func (c *MapContext) Err() error { + return errors.Join(c.errs...) +} + +type ProtoEnum interface { + ~int32 + Descriptor() protoreflect.EnumDescriptor +} + +func Enum_ToProto[U ProtoEnum](mapCtx *MapContext, in *string) U { + var defaultU U + descriptor := defaultU.Descriptor() + + inValue := ValueOf(in) + if inValue == "" { + unspecifiedValue := U(0) + return unspecifiedValue + } + + n := descriptor.Values().Len() + for i := 0; i < n; i++ { + value := descriptor.Values().Get(i) + if string(value.Name()) == inValue { + v := U(value.Number()) + return v + } + } + + var validValues []string + for i := 0; i < n; i++ { + value := descriptor.Values().Get(i) + validValues = append(validValues, string(value.Name())) + } + + mapCtx.Errorf("unknown enum value %q for %v (valid values are %v)", inValue, descriptor.FullName(), strings.Join(validValues, ", ")) + return 0 +} + +func Enum_FromProto[U ProtoEnum](mapCtx *MapContext, v U) *string { + descriptor := v.Descriptor() + + if v == 0 { + return nil + } + + val := descriptor.Values().ByNumber(protoreflect.EnumNumber(v)) + if val == nil { + mapCtx.Errorf("unknown enum value %d", v) + return nil + } + s := string(val.Name()) + return &s +} + +func LazyPtr[V comparable](v V) *V { + var defaultV V + if v == defaultV { + return nil + } + return &v +} + +func ToOpenAPIDateTime(ts *timestamppb.Timestamp) *string { + formatted := ts.AsTime().Format(time.RFC3339) + return &formatted +} diff --git a/pkg/controller/direct/cloudbuild/workerpool_controller.go b/pkg/controller/direct/cloudbuild/workerpool_controller.go index e882328be6..e471c3de6f 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_controller.go +++ b/pkg/controller/direct/cloudbuild/workerpool_controller.go @@ -22,7 +22,6 @@ import ( "fmt" "reflect" "strings" - "time" gcp "cloud.google.com/go/cloudbuild/apiv1/v2" cloudbuildpb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" @@ -35,7 +34,6 @@ import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" "github.com/googleapis/gax-go/v2/apierror" "google.golang.org/protobuf/types/known/fieldmaskpb" - "google.golang.org/protobuf/types/known/timestamppb" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" @@ -171,13 +169,13 @@ func (a *Adapter) Create(ctx context.Context, u *unstructured.Unstructured) erro } desired := a.desired.DeepCopy() - wp := &cloudbuildpb.WorkerPool{ - Name: a.fullyQualifiedName(), - } - err := krm.Convert_WorkerPool_KRM_To_API_v1(desired, wp) - if err != nil { - return fmt.Errorf("converting workerpool spec to api: %w", err) + + mapCtx := &MapContext{} + wp := CloudBuildWorkerPoolSpec_ToProto(mapCtx, &desired.Spec) + if mapCtx.Err() != nil { + return mapCtx.Err() } + wp.Name = a.fullyQualifiedName() req := &cloudbuildpb.CreateWorkerPoolRequest{ Parent: a.getParent(), WorkerPoolId: a.resourceID, @@ -191,12 +189,12 @@ func (a *Adapter) Create(ctx context.Context, u *unstructured.Unstructured) erro if err != nil { return fmt.Errorf("cloudbuildworkerpool %s waiting creation failed: %w", wp.Name, err) } + status := &krm.CloudBuildWorkerPoolStatus{} - if err := krm.Convert_WorkerPool_API_v1_To_KRM_status(created, status); err != nil { - return fmt.Errorf("update workerpool status %w", err) + status.ObservedState = CloudBuildWorkerPoolObservedState_FromProto(mapCtx, created) + if mapCtx.Err() != nil { + return mapCtx.Err() } - status.ObservedState.CreateTime = ToOpenAPIDateTime(created.GetCreateTime()) - status.ObservedState.UpdateTime = ToOpenAPIDateTime(created.GetUpdateTime()) resRef, err := NewResourceRef(created) if err != nil { return err @@ -265,15 +263,14 @@ func (a *Adapter) Update(ctx context.Context, u *unstructured.Unstructured) erro return nil } - wp := &cloudbuildpb.WorkerPool{ - Name: a.fullyQualifiedName(), - Etag: a.actual.Etag, - } desired := a.desired.DeepCopy() - err := krm.Convert_WorkerPool_KRM_To_API_v1(desired, wp) - if err != nil { - return fmt.Errorf("converting workerpool spec to api: %w", err) + mapCtx := &MapContext{} + wp := CloudBuildWorkerPoolSpec_ToProto(mapCtx, &desired.Spec) + if mapCtx.Err() != nil { + return mapCtx.Err() } + wp.Name = a.fullyQualifiedName() + wp.Etag = a.actual.Etag req := &cloudbuildpb.UpdateWorkerPoolRequest{ WorkerPool: wp, UpdateMask: updateMask, @@ -287,11 +284,10 @@ func (a *Adapter) Update(ctx context.Context, u *unstructured.Unstructured) erro return fmt.Errorf("cloudbuildworkerpool %s waiting update failed: %w", wp.Name, err) } status := &krm.CloudBuildWorkerPoolStatus{} - if err := krm.Convert_WorkerPool_API_v1_To_KRM_status(updated, status); err != nil { - return fmt.Errorf("update workerpool status %w", err) + status.ObservedState = CloudBuildWorkerPoolObservedState_FromProto(mapCtx, updated) + if mapCtx.Err() != nil { + return fmt.Errorf("update workerpool status %w", mapCtx.Err()) } - status.ObservedState.CreateTime = ToOpenAPIDateTime(updated.GetCreateTime()) - status.ObservedState.UpdateTime = ToOpenAPIDateTime(updated.GetUpdateTime()) // This value should not be updated. Just in case. resRef, err := NewResourceRef(updated) if err != nil { @@ -416,19 +412,3 @@ func HasHTTPCode(err error, code int) bool { } return false } - -// LazyPtr returns a pointer to v, unless it is the empty value, in which case it returns nil. -// It is essentially the inverse of ValueOf, though it is lossy -// because we can't tell nil and empty apart without a pointer. -func LazyPtr[T comparable](v T) *T { - var defaultValue T - if v == defaultValue { - return nil - } - return &v -} - -func ToOpenAPIDateTime(ts *timestamppb.Timestamp) *string { - formatted := ts.AsTime().Format(time.RFC3339) - return &formatted -} diff --git a/pkg/controller/direct/cloudbuild/workerpool_mappings.go b/pkg/controller/direct/cloudbuild/workerpool_mappings.go new file mode 100644 index 0000000000..9fb1bb4b51 --- /dev/null +++ b/pkg/controller/direct/cloudbuild/workerpool_mappings.go @@ -0,0 +1,109 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudbuild + +import ( + pb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" + + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/cloudbuild/v1alpha1" + refv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" +) + +func CloudBuildWorkerPoolObservedState_FromProto(mapCtx *MapContext, in *pb.WorkerPool) *krm.CloudBuildWorkerPoolObservedState { + if in == nil { + return nil + } + out := &krm.CloudBuildWorkerPoolObservedState{} + out.ETag = LazyPtr(in.Etag) + privateConfig := PrivatePoolV1Config_FromProto(mapCtx, in.GetPrivatePoolV1Config()) + out.NetworkConfig = privateConfig.NetworkConfig + out.WorkerConfig = privateConfig.WorkerConfig + out.CreateTime = ToOpenAPIDateTime(in.GetCreateTime()) + out.UpdateTime = ToOpenAPIDateTime(in.GetUpdateTime()) + return out +} + +func CloudBuildWorkerPoolSpec_ToProto(mapCtx *MapContext, in *krm.CloudBuildWorkerPoolSpec) *pb.WorkerPool { + if in == nil { + return nil + } + out := &pb.WorkerPool{} + out.DisplayName = in.DisplayName + out.Config = &pb.WorkerPool_PrivatePoolV1Config{ + PrivatePoolV1Config: PrivatePoolV1Config_ToProto(mapCtx, in.PrivatePoolConfig), + } + return out +} + +func PrivatePoolV1Config_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config) *krm.PrivatePoolV1Config { + if in == nil { + return nil + } + out := &krm.PrivatePoolV1Config{} + out.WorkerConfig = PrivatePoolV1Config_WorkerConfig_FromProto(mapCtx, in.GetWorkerConfig()) + out.NetworkConfig = PrivatePoolV1Config_NetworkConfig_FromProto(mapCtx, in.GetNetworkConfig()) + return out +} +func PrivatePoolV1Config_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config) *pb.PrivatePoolV1Config { + if in == nil { + return nil + } + out := &pb.PrivatePoolV1Config{} + out.WorkerConfig = PrivatePoolV1Config_WorkerConfig_ToProto(mapCtx, in.WorkerConfig) + out.NetworkConfig = PrivatePoolV1Config_NetworkConfig_ToProto(mapCtx, in.NetworkConfig) + return out +} +func PrivatePoolV1Config_NetworkConfig_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config_NetworkConfig) *krm.PrivatePoolV1Config_NetworkConfig { + if in == nil { + return nil + } + out := &krm.PrivatePoolV1Config_NetworkConfig{} + out.PeeredNetworkRef = refv1beta1.ComputeNetworkRef{ + External: in.GetPeeredNetwork(), + } + out.EgressOption = Enum_FromProto(mapCtx, in.EgressOption) + out.PeeredNetworkIPRange = LazyPtr(in.GetPeeredNetworkIpRange()) + return out +} +func PrivatePoolV1Config_NetworkConfig_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config_NetworkConfig) *pb.PrivatePoolV1Config_NetworkConfig { + if in == nil { + return nil + } + out := &pb.PrivatePoolV1Config_NetworkConfig{} + out.PeeredNetwork = in.PeeredNetworkRef.External + out.EgressOption = Enum_ToProto[pb.PrivatePoolV1Config_NetworkConfig_EgressOption](mapCtx, in.EgressOption) + out.PeeredNetworkIpRange = ValueOf(in.PeeredNetworkIPRange) + return out +} +func PrivatePoolV1Config_WorkerConfig_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config_WorkerConfig) *krm.PrivatePoolV1Config_WorkerConfig { + if in == nil { + return nil + } + out := &krm.PrivatePoolV1Config_WorkerConfig{} + out.MachineType = LazyPtr(in.GetMachineType()) + out.DiskSizeGb = LazyPtr(in.GetDiskSizeGb()) + return out +} +func PrivatePoolV1Config_WorkerConfig_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config_WorkerConfig) *pb.PrivatePoolV1Config_WorkerConfig { + if in == nil { + return nil + } + out := &pb.PrivatePoolV1Config_WorkerConfig{} + out.MachineType = ValueOf(in.MachineType) + out.DiskSizeGb = ValueOf(in.DiskSizeGb) + return out +} From 5c17ef08d780052d1dae6cca314b98b3ae2824a0 Mon Sep 17 00:00:00 2001 From: Joyce Ma Date: Fri, 21 Jun 2024 23:41:44 +0000 Subject: [PATCH 038/101] Hide output-only spec field but support it in observed state --- ...buckets.storage.cnrm.cloud.google.com.yaml | 5 -- config/servicemappings/storage.yaml | 2 + .../servicemapping/servicemapping_test.go | 44 ++++++++++++++ .../core/v1alpha1/servicemapping_types.go | 8 +++ .../storage/v1beta1/storagebucket_types.go | 4 -- .../storage/v1beta1/zz_generated.deepcopy.go | 5 -- pkg/crd/crdgeneration/tf2crdgeneration.go | 27 ++++++--- pkg/krmtotf/tftokrm.go | 59 ++++++++++++------- pkg/krmtotf/tftokrm_test.go | 2 +- .../resource-docs/storage/storagebucket.md | 11 ---- 10 files changed, 113 insertions(+), 54 deletions(-) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml index 91ac26624b..311e7394a5 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml @@ -293,11 +293,6 @@ spec: permanently deleted. If it is not provided, by default Google Cloud Storage sets this to default soft delete policy. properties: - effectiveTime: - description: Server-determined value that indicates the time from - which the policy, or one with a greater retention, was effective. - This value is in RFC 3339 format. - type: string retentionDurationSeconds: description: The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. diff --git a/config/servicemappings/storage.yaml b/config/servicemappings/storage.yaml index 22c777a3ec..28ff8737ee 100644 --- a/config/servicemappings/storage.yaml +++ b/config/servicemappings/storage.yaml @@ -36,6 +36,8 @@ spec: labels: labels resourceID: targetField: name + ignoredOutputOnlySpecFields: + - soft_delete_policy.effective_time observedFields: - soft_delete_policy.effective_time - soft_delete_policy.retention_duration_seconds diff --git a/config/tests/servicemapping/servicemapping_test.go b/config/tests/servicemapping/servicemapping_test.go index a8c63c048f..82cbea0cac 100644 --- a/config/tests/servicemapping/servicemapping_test.go +++ b/config/tests/servicemapping/servicemapping_test.go @@ -236,6 +236,11 @@ func TestTerraformFieldsAreInResourceSchema(t *testing.T) { for _, f := range rc.IgnoredFields { fields = append(fields, f) } + if rc.IgnoredOutputOnlySpecFields != nil { + for _, o := range *rc.IgnoredOutputOnlySpecFields { + fields = append(fields, o) + } + } for _, c := range rc.Containers { fields = append(fields, c.TFField) } @@ -1431,3 +1436,42 @@ func assertReferencedResourcesNotAlpha(t *testing.T, rc *v1alpha1.ResourceConfig } } } + +func TestIgnoredOutputOnlySpecFields(t *testing.T) { + t.Parallel() + serviceMappings := testservicemappingloader.New(t).GetServiceMappings() + provider := tfprovider.NewOrLogFatal(tfprovider.UnitTestConfig()) + for _, sm := range serviceMappings { + sm := sm + t.Run(sm.Name, func(t *testing.T) { + t.Parallel() + for _, rc := range sm.Spec.Resources { + tfResource := provider.ResourcesMap[rc.Name] + rc := rc + t.Run(rc.Kind, func(t *testing.T) { + t.Parallel() + if rc.IgnoredOutputOnlySpecFields == nil { + return + } + if len(*rc.IgnoredOutputOnlySpecFields) == 0 { + t.Errorf("kind %v has an empty IgnoredOutputOnlySpecFields slice", rc.Kind) + return + } + for _, f := range *rc.IgnoredOutputOnlySpecFields { + if f == "" { + t.Errorf("kind %v has an empty value in IgnoredOutputOnlySpecFields slice", rc.Kind) + return + } + fieldSchema, err := tfresource.GetTFSchemaForField(tfResource, f) + if err != nil { + t.Errorf("error getting TF schema for output-only spec field %v in kind %v", f, rc.Kind) + } + if tfresource.IsConfigurableField(fieldSchema) { + t.Errorf("output-only spec field %v in kind %v is configurable", f, rc.Kind) + } + } + }) + } + }) + } +} diff --git a/pkg/apis/core/v1alpha1/servicemapping_types.go b/pkg/apis/core/v1alpha1/servicemapping_types.go index fd440bcf78..51bf4a097d 100644 --- a/pkg/apis/core/v1alpha1/servicemapping_types.go +++ b/pkg/apis/core/v1alpha1/servicemapping_types.go @@ -131,6 +131,14 @@ type ResourceConfig struct { // Terraform resource. IgnoredFields []string `json:"ignoredFields,omitempty"` + // IgnoredOutputOnlySpecFields is a list of fields that should not be added + // to spec because they are output-only. + // We have a legacy bug that adds all the fields under a writable top-level + // field into spec during CRD generation even if the subfield itself is + // output-only. We should stop the bleeding by manually adding any new + // output-only subfields under a writable top-level field into this list. + IgnoredOutputOnlySpecFields *[]string `json:"ignoredOutputOnlySpecFields,omitempty"` + // Deprecated: use HierarchicalReferences instead. Only resources that // already specify Containers should continue to specify Containers so that // these resources can continue to support resource-level container diff --git a/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go b/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go index 6d10a3d790..c7386c2a5e 100644 --- a/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go +++ b/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go @@ -150,10 +150,6 @@ type BucketRetentionPolicy struct { } type BucketSoftDeletePolicy struct { - /* Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format. */ - // +optional - EffectiveTime *string `json:"effectiveTime,omitempty"` - /* The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800. */ // +optional RetentionDurationSeconds *int64 `json:"retentionDurationSeconds,omitempty"` diff --git a/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go index 38f50003de..ece1465a8b 100644 --- a/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -295,11 +295,6 @@ func (in *BucketRetentionPolicy) DeepCopy() *BucketRetentionPolicy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BucketSoftDeletePolicy) DeepCopyInto(out *BucketSoftDeletePolicy) { *out = *in - if in.EffectiveTime != nil { - in, out := &in.EffectiveTime, &out.EffectiveTime - *out = new(string) - **out = **in - } if in.RetentionDurationSeconds != nil { in, out := &in.RetentionDurationSeconds, &out.RetentionDurationSeconds *out = new(int64) diff --git a/pkg/crd/crdgeneration/tf2crdgeneration.go b/pkg/crd/crdgeneration/tf2crdgeneration.go index b92558881e..6e9ad3dfc1 100644 --- a/pkg/crd/crdgeneration/tf2crdgeneration.go +++ b/pkg/crd/crdgeneration/tf2crdgeneration.go @@ -63,13 +63,6 @@ func GenerateTF2CRD(sm *corekccv1alpha1.ServiceMapping, resourceConfig *corekccv addResourceIDFieldIfSupported(resourceConfig, specJSONSchema) handleHierarchicalReferences(resourceConfig, specJSONSchema) - if len(specJSONSchema.Properties) > 0 { - openAPIV3Schema.Properties["spec"] = *specJSONSchema - if len(specJSONSchema.Required) > 0 { - openAPIV3Schema.Required = slice.IncludeString(openAPIV3Schema.Required, "spec") - } - } - var err error if k8s.OutputOnlyFieldsAreUnderObservedState(kubeschema.GroupVersionKind{ Kind: resourceConfig.Kind, @@ -84,6 +77,15 @@ func GenerateTF2CRD(sm *corekccv1alpha1.ServiceMapping, resourceConfig *corekccv } } addObservedFieldsToObservedState(resourceConfig, specJSONSchema, statusOrObservedStateJSONSchema) + removeIgnoredOutputOnlySpecFields(resourceConfig, specJSONSchema) + + if len(specJSONSchema.Properties) > 0 { + openAPIV3Schema.Properties["spec"] = *specJSONSchema + if len(specJSONSchema.Required) > 0 { + openAPIV3Schema.Required = slice.IncludeString(openAPIV3Schema.Required, "spec") + } + } + for k, v := range statusOrObservedStateJSONSchema.Properties { openAPIV3Schema.Properties["status"].Properties[k] = v } @@ -261,6 +263,17 @@ func removeOverwrittenFields(rc *corekccv1alpha1.ResourceConfig, s *apiextension } } } +func removeIgnoredOutputOnlySpecFields(rc *corekccv1alpha1.ResourceConfig, specJSONSchema *apiextensions.JSONSchemaProps) { + if rc.IgnoredOutputOnlySpecFields == nil { + return + } + for _, f := range *rc.IgnoredOutputOnlySpecFields { + removedInSpec := removeFieldIfExist(f, specJSONSchema) + if !removedInSpec { + panic(fmt.Errorf("cannot find the output-only spec field %s in spec JSON schema for resource %s", f, rc.Name)) + } + } +} func removeIgnoredFields(rc *corekccv1alpha1.ResourceConfig, specJSONSchema, statusJSONSchema *apiextensions.JSONSchemaProps) { for _, f := range rc.IgnoredFields { diff --git a/pkg/krmtotf/tftokrm.go b/pkg/krmtotf/tftokrm.go index 0e7463ff45..1e63c7f40b 100644 --- a/pkg/krmtotf/tftokrm.go +++ b/pkg/krmtotf/tftokrm.go @@ -64,9 +64,10 @@ func ResolveSpecAndStatus(resource *Resource, state *terraform.InstanceState) ( func GetSpecAndStatusFromState(resource *Resource, state *terraform.InstanceState) ( spec map[string]interface{}, status map[string]interface{}) { unmodifiedState := InstanceStateToMap(resource.TFResource, state) - krmState := ConvertTFObjToKCCObj(unmodifiedState, resource.Spec, resource.TFResource.Schema, + krmState, krmStateWithIgnoredOutputOnlySpecFields := ConvertTFObjToKCCObj(unmodifiedState, resource.Spec, resource.TFResource.Schema, &resource.ResourceConfig, "", resource.ManagedFields) krmState = withCustomExpanders(krmState, resource, resource.Kind) + krmStateWithIgnoredOutputOnlySpecFields = withCustomExpanders(krmStateWithIgnoredOutputOnlySpecFields, resource, resource.Kind) spec = make(map[string]interface{}) status = make(map[string]interface{}) for field, fieldSchema := range resource.TFResource.Schema { @@ -107,7 +108,7 @@ func GetSpecAndStatusFromState(resource *Resource, state *terraform.InstanceStat status["observedGeneration"] = deepcopy.DeepCopy(observedGeneration) } if resource.ResourceConfig.ObservedFields != nil { - observedFields := resolveObservedFields(resource, krmState) + observedFields := resolveObservedFields(resource, krmStateWithIgnoredOutputOnlySpecFields) if len(observedFields) > 0 { // Merge the observed fields into the observed state. observedState, ok := status[k8s.ObservedStateFieldName] @@ -458,8 +459,9 @@ func getValueFromState(state map[string]interface{}, key string) (string, bool) } // ConvertTFObjToKCCObj takes the state (which should be a Terraform resource), -// and returns a map that is formatted to KCC's custom resource schema for the -// appropriate Kind. +// and returns two maps: the first one is formatted to KCC's custom resource +// schema for the appropriate Kind, the second one contains additional +// output-only fields that are used in observed state only. // // prevSpec is used for multiple purposes: // - ensures the returned result has a similar order for objects in lists, reducing @@ -470,26 +472,35 @@ func getValueFromState(state map[string]interface{}, key string) (string, bool) // state and the prevSpec. func ConvertTFObjToKCCObj(state map[string]interface{}, prevSpec map[string]interface{}, schemas map[string]*tfschema.Schema, rc *corekccv1alpha1.ResourceConfig, prefix string, - managedFields *fieldpath.Set) map[string]interface{} { - raw := convertTFMapToKCCMap(state, prevSpec, schemas, rc, prefix, managedFields) + managedFields *fieldpath.Set) (krmState, krmStateWithIgnoredOutputOnlySpecFields map[string]interface{}) { + rawKRMState := convertTFMapToKCCMap(state, prevSpec, schemas, rc, prefix, managedFields, true) + rawKRMStateWithIgnoredOutputOnlySpecFields := deepcopy.DeepCopy(rawKRMState) + if rc.IgnoredOutputOnlySpecFields != nil { + rawKRMStateWithIgnoredOutputOnlySpecFields = + convertTFMapToKCCMap(state, prevSpec, schemas, rc, prefix, managedFields, false) + } // Round-trip via JSON in order to ensure consistency with unstructured.Unstructured's Object type. - var ret map[string]interface{} - if err := util.Marshal(raw, &ret); err != nil { + var retKRMState map[string]interface{} + if err := util.Marshal(rawKRMState, &retKRMState); err != nil { panic(fmt.Errorf("error normalizing KRM-ified object: %w", err)) } - return ret + var retKRMStateWithIgnoredOutputOnlySpecFields map[string]interface{} + if err := util.Marshal(rawKRMStateWithIgnoredOutputOnlySpecFields, &retKRMStateWithIgnoredOutputOnlySpecFields); err != nil { + panic(fmt.Errorf("error normalizing KRM-ified object: %w", err)) + } + return retKRMState, retKRMStateWithIgnoredOutputOnlySpecFields } func convertTFMapToKCCMap(state map[string]interface{}, prevSpec map[string]interface{}, schemas map[string]*tfschema.Schema, rc *corekccv1alpha1.ResourceConfig, prefix string, - managedFields *fieldpath.Set) map[string]interface{} { + managedFields *fieldpath.Set, ignoreOutputOnlySpecFields bool) map[string]interface{} { ret := make(map[string]interface{}) for field, schema := range schemas { qualifiedName := field if prefix != "" { qualifiedName = prefix + "." + field } - if isOverriddenField(qualifiedName, rc) { + if isOverriddenField(qualifiedName, rc, ignoreOutputOnlySpecFields) { continue } if ok, refConfig := IsReferenceField(qualifiedName, rc); ok { @@ -584,7 +595,7 @@ func convertTFMapToKCCMap(state map[string]interface{}, prevSpec map[string]inte nestedManagedFields = fieldpath.NewSet() } } - if val := convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, qualifiedName, nestedManagedFields); val != nil { + if val := convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, qualifiedName, nestedManagedFields, ignoreOutputOnlySpecFields); val != nil { ret[key] = val } continue @@ -595,7 +606,7 @@ func convertTFMapToKCCMap(state map[string]interface{}, prevSpec map[string]inte // the status can be treated the same as lists, as the new state is the definitive // source of truth and there is no reference resolution. if schema.Required || schema.Optional { - retObj := convertTFSetToKCCSet(stateVal, prevSpecVal, schema, rc, qualifiedName) + retObj := convertTFSetToKCCSet(stateVal, prevSpecVal, schema, rc, qualifiedName, ignoreOutputOnlySpecFields) if retObj != nil { ret[key] = retObj } @@ -618,7 +629,7 @@ func convertTFMapToKCCMap(state map[string]interface{}, prevSpec map[string]inte if idx < len(prevList) { prevObjMap, _ = prevList[idx].(map[string]interface{}) } - if val := convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, qualifiedName, nil); val != nil { + if val := convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, qualifiedName, nil, ignoreOutputOnlySpecFields); val != nil { retObjList = append(retObjList, val) } } @@ -718,7 +729,7 @@ func convertTFReferenceToKCCReference(tfField, specKey string, state map[string] } // convertTFSetToKCCSet converts a set object in Terraform to a KCC set object -func convertTFSetToKCCSet(stateVal, prevSpecVal interface{}, schema *tfschema.Schema, rc *corekccv1alpha1.ResourceConfig, prefix string) interface{} { +func convertTFSetToKCCSet(stateVal, prevSpecVal interface{}, schema *tfschema.Schema, rc *corekccv1alpha1.ResourceConfig, prefix string, ignoreOutputOnlySpecFields bool) interface{} { if containsReferenceField(prefix, rc) { // TODO(kcc-eng): Support the case where the hashing function depends on resolved values from // resource references. For the time being, fall back to the declared state. @@ -767,12 +778,12 @@ func convertTFSetToKCCSet(stateVal, prevSpecVal interface{}, schema *tfschema.Sc stateElem = map[string]interface{}{} } retObjList = append(retObjList, - convertTFElemToKCCElem(schema.Elem, stateElem, prevElem, rc, prefix)) + convertTFElemToKCCElem(schema.Elem, stateElem, prevElem, rc, prefix, ignoreOutputOnlySpecFields)) } // append any new elements in the list to the end for _, newElem := range stateHashMap { retObjList = append(retObjList, - convertTFElemToKCCElem(schema.Elem, newElem, nil, rc, prefix)) + convertTFElemToKCCElem(schema.Elem, newElem, nil, rc, prefix, ignoreOutputOnlySpecFields)) } if len(retObjList) == 0 { return nil @@ -867,7 +878,7 @@ func getDefaultValueForTFType(tfType tfschema.ValueType) interface{} { } } -func convertTFElemToKCCElem(elemSchema, tfObj, prevSpecObj interface{}, rc *corekccv1alpha1.ResourceConfig, prefix string) interface{} { +func convertTFElemToKCCElem(elemSchema, tfObj, prevSpecObj interface{}, rc *corekccv1alpha1.ResourceConfig, prefix string, ignoreOutputOnlySpecFields bool) interface{} { switch elemSchema.(type) { case *tfschema.Schema: if prevSpecObj != nil { @@ -878,13 +889,13 @@ func convertTFElemToKCCElem(elemSchema, tfObj, prevSpecObj interface{}, rc *core tfObjSchema := elemSchema.(*tfschema.Resource).Schema tfObjMap, _ := tfObj.(map[string]interface{}) prevObjMap, _ := prevSpecObj.(map[string]interface{}) - return convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, prefix, nil) + return convertTFMapToKCCMap(tfObjMap, prevObjMap, tfObjSchema, rc, prefix, nil, ignoreOutputOnlySpecFields) default: return prevSpecObj } } -func isOverriddenField(field string, rc *corekccv1alpha1.ResourceConfig) bool { +func isOverriddenField(field string, rc *corekccv1alpha1.ResourceConfig, ignoreOutputOnlySpecFields bool) bool { if field == rc.MetadataMapping.Name || field == rc.MetadataMapping.Labels { return true } @@ -907,7 +918,13 @@ func isOverriddenField(field string, rc *corekccv1alpha1.ResourceConfig) bool { return true } } - + } + if ignoreOutputOnlySpecFields && rc.IgnoredOutputOnlySpecFields != nil { + for _, f := range *rc.IgnoredOutputOnlySpecFields { + if field == f { + return true + } + } } return false } diff --git a/pkg/krmtotf/tftokrm_test.go b/pkg/krmtotf/tftokrm_test.go index 4a23a5737e..6612b2d74f 100644 --- a/pkg/krmtotf/tftokrm_test.go +++ b/pkg/krmtotf/tftokrm_test.go @@ -1017,7 +1017,7 @@ func TestConvertTFObjToKCCObj(t *testing.T) { r.TFResource.Schema = tc.schemaOverride } r.SetNamespace(test.Namespace) - actual := ConvertTFObjToKCCObj(tc.state, tc.prevSpec, r.TFResource.Schema, &r.ResourceConfig, "", tc.managedFields) + actual, _ := ConvertTFObjToKCCObj(tc.state, tc.prevSpec, r.TFResource.Schema, &r.ResourceConfig, "", tc.managedFields) if !reflect.DeepEqual(tc.expected, actual) { t.Fatalf("expected: %v, actual: %v", tc.expected, actual) } diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md index e5436ab54c..a681e72156 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md @@ -157,7 +157,6 @@ retentionPolicy: isLocked: boolean retentionPeriod: integer softDeletePolicy: - effectiveTime: string retentionDurationSeconds: integer storageClass: string uniformBucketLevelAccess: boolean @@ -697,16 +696,6 @@ Enables Bucket PolicyOnly access to a bucket.{% endverbatim %}

{% verbatim %}The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If it is not provided, by default Google Cloud Storage sets this to default soft delete policy.{% endverbatim %}

- - -

softDeletePolicy.effectiveTime

-

Optional

- - -

string

-

{% verbatim %}Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.{% endverbatim %}

- -

softDeletePolicy.retentionDurationSeconds

From 1978a5e9534e5ec33bcb756c0635c60d52a682fb Mon Sep 17 00:00:00 2001 From: Joyce Ma Date: Sat, 22 Jun 2024 00:31:44 +0000 Subject: [PATCH 039/101] Fix powertool tests --- .../powertool/powertool_set_bucket_location/_object00.yaml | 1 - .../powertool/powertool_set_bucket_location/_object02.yaml | 1 - .../storagebucket_clear_state_into_spec/_cli-2-stdout.log | 1 - .../powertool/storagebucket_clear_state_into_spec/_object00.yaml | 1 - 4 files changed, 4 deletions(-) diff --git a/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object00.yaml b/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object00.yaml index 6d48874c08..14d0829292 100644 --- a/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object00.yaml +++ b/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object00.yaml @@ -38,7 +38,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: diff --git a/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object02.yaml b/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object02.yaml index 4cde96662c..631d6a03e5 100644 --- a/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object02.yaml +++ b/tests/e2e/testdata/scenarios/powertool/powertool_set_bucket_location/_object02.yaml @@ -38,7 +38,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: diff --git a/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_cli-2-stdout.log b/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_cli-2-stdout.log index 5d413eca31..e6cee48d7c 100644 --- a/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_cli-2-stdout.log +++ b/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_cli-2-stdout.log @@ -7,7 +7,6 @@ spec: publicAccessPrevention: inherited -> softDeletePolicy: - effectiveTime: 2024-04-01T00:00:00Z -> retentionDurationSeconds: 604800 -> storageClass: STANDARD -> diff --git a/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_object00.yaml b/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_object00.yaml index dc0b7aea7d..1942a802fb 100644 --- a/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_object00.yaml +++ b/tests/e2e/testdata/scenarios/powertool/storagebucket_clear_state_into_spec/_object00.yaml @@ -38,7 +38,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-merge-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: From 85de5e5bbadc2e205a9225794f3c59e3759ff192 Mon Sep 17 00:00:00 2001 From: Joyce Ma Date: Mon, 24 Jun 2024 21:49:13 +0000 Subject: [PATCH 040/101] Clean up 'spec.softDeletePolicy.effectiveTime' in generated files --- .../forcedestroy/_generated_object_forcedestroy.golden.yaml | 1 - .../storagebucket/_generated_object_storagebucket#01.golden.yaml | 1 - .../storagebucket/_generated_object_storagebucket.golden.yaml | 1 - 3 files changed, 3 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/directives/forcedestroy/_generated_object_forcedestroy.golden.yaml b/pkg/test/resourcefixture/testdata/directives/forcedestroy/_generated_object_forcedestroy.golden.yaml index 117cca1860..00584f17df 100644 --- a/pkg/test/resourcefixture/testdata/directives/forcedestroy/_generated_object_forcedestroy.golden.yaml +++ b/pkg/test/resourcefixture/testdata/directives/forcedestroy/_generated_object_forcedestroy.golden.yaml @@ -21,7 +21,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: diff --git a/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket#01.golden.yaml b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket#01.golden.yaml index 80f40114e0..afb60eec51 100644 --- a/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket#01.golden.yaml +++ b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket#01.golden.yaml @@ -19,7 +19,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: diff --git a/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml index 80f40114e0..afb60eec51 100644 --- a/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml +++ b/pkg/test/resourcefixture/testdata/reconcileintervalannotations/storagebucket/_generated_object_storagebucket.golden.yaml @@ -19,7 +19,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: From 29777ef939c2a536e8647a9f65c49af426ba1bd0 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Tue, 7 May 2024 13:30:49 -0700 Subject: [PATCH 041/101] Update google_alloydb_instance resource with Public IP fields from Terraform Add public_ip_address and network_config field. Under network_config is authorized_external_networks and enable_public_ip. These fields apply on instance creation and update. --- .../alloydb/resource_alloydb_instance.go | 218 ++++++++++++++++++ 1 file changed, 218 insertions(+) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go index dce8679c52..e369c9df9a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go @@ -134,6 +134,36 @@ can have regional availability (nodes are present in 2 or more zones in a region }, }, }, + "network_config": { + Type: schema.TypeList, + Optional: true, + Description: `Instance level network configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_external_networks": { + Type: schema.TypeList, + Optional: true, + Description: `A list of external networks authorized to access this instance. This field is only allowed to be set when 'enable_public_ip' is set to true.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_range": { + Type: schema.TypeString, + Optional: true, + Description: `CIDR range for one authorized network of the instance.`, + }, + }, + }, + RequiredWith: []string{"network_config.0.enable_public_ip"}, + }, + "enable_public_ip": { + Type: schema.TypeBool, + Optional: true, + Description: `Enabling public ip for the instance. If a user wishes to disable this, please also clear the list of the authorized external networks set on the same instance.`, + }, + }, + }, + }, "read_pool_config": { Type: schema.TypeList, Optional: true, @@ -164,6 +194,11 @@ can have regional availability (nodes are present in 2 or more zones in a region Computed: true, Description: `The name of the instance resource.`, }, + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The public IP addresses for the Instance. This is available ONLY when networkConfig.enablePublicIp is set to true. This is the connection endpoint for an end-user application.`, + }, "reconciling": { Type: schema.TypeBool, Computed: true, @@ -252,6 +287,12 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("machine_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(machineConfigProp)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { obj["machineConfig"] = machineConfigProp } + networkConfigProp, err := expandAlloydbInstanceNetworkConfig(d.Get("network_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { + obj["networkConfig"] = networkConfigProp + } url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances?instanceId={{instance_id}}") if err != nil { @@ -266,6 +307,20 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err billingProject = bp } + // Temporarily remove the enablePublicIp field if it is set to true since the + // API prohibits creating instances with public IP enabled. + var nc map[string]interface{} + if obj["networkConfig"] == nil { + nc = make(map[string]interface{}) + } else { + nc = obj["networkConfig"].(map[string]interface{}) + } + if nc["enablePublicIp"] == true { + delete(nc, "enablePublicIp") + delete(nc, "authorizedExternalNetworks") + } + obj["networkConfig"] = nc + // Read the config and call createsecondary api if instance_type is SECONDARY if instanceType := d.Get("instance_type"); instanceType == "SECONDARY" { @@ -301,6 +356,51 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error waiting to create Instance: %s", err) } + // If enablePublicIp is set to true, then we must create the instance first with + // it disabled then update to enable it. + networkConfigProp, err = expandAlloydbInstanceNetworkConfig(d.Get("network_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { + nc := networkConfigProp.(map[string]interface{}) + if nc["enablePublicIp"] == true { + obj["networkConfig"] = networkConfigProp + + updateMask := []string{} + updateMask = append(updateMask, "networkConfig") + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + updateRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating the Instance to enable public ip: %s", err) + } else { + log.Printf("[DEBUG] Finished updating Instance to enable public ip %q: %#v", d.Id(), updateRes) + } + err = AlloydbOperationWaitTime( + config, updateRes, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + } + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) return resourceAlloydbInstanceRead(d, meta) @@ -381,6 +481,12 @@ func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("machine_config", flattenAlloydbInstanceMachineConfig(res["machineConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("network_config", flattenAlloydbInstanceNetworkConfig(res["networkConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("public_ip_address", flattenAlloydbInstancePublicIpAddress(res["publicIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } return nil } @@ -444,6 +550,12 @@ func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("machine_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { obj["machineConfig"] = machineConfigProp } + networkConfigProp, err := expandAlloydbInstanceNetworkConfig(d.Get("network_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { + obj["networkConfig"] = networkConfigProp + } url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") if err != nil { @@ -484,6 +596,10 @@ func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("machine_config") { updateMask = append(updateMask, "machineConfig") } + + if d.HasChange("network_config") { + updateMask = append(updateMask, "networkConfig") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -724,6 +840,52 @@ func flattenAlloydbInstanceMachineConfigCpuCount(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } +func flattenAlloydbInstanceNetworkConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["authorized_external_networks"] = + flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(original["authorizedExternalNetworks"], d, config) + transformed["enable_public_ip"] = + flattenAlloydbInstanceNetworkConfigEnablePublicIp(original["enablePublicIp"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "cidr_range": flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworksCidrRange(original["cidrRange"], d, config), + }) + } + return transformed +} +func flattenAlloydbInstanceNetworkConfigAuthorizedExternalNetworksCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceNetworkConfigEnablePublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstancePublicIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + + func expandAlloydbInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -818,3 +980,59 @@ func expandAlloydbInstanceMachineConfig(v interface{}, d tpgresource.TerraformRe func expandAlloydbInstanceMachineConfigCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandAlloydbInstanceNetworkConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAuthorizedExternalNetworks, err := expandAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(original["authorized_external_networks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthorizedExternalNetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authorizedExternalNetworks"] = transformedAuthorizedExternalNetworks + } + + transformedEnablePublicIp, err := expandAlloydbInstanceNetworkConfigEnablePublicIp(original["enable_public_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnablePublicIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enablePublicIp"] = transformedEnablePublicIp + } + + return transformed, nil +} + +func expandAlloydbInstanceNetworkConfigAuthorizedExternalNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCidrRange, err := expandAlloydbInstanceNetworkConfigAuthorizedExternalNetworksCidrRange(original["cidr_range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCidrRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cidrRange"] = transformedCidrRange + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAlloydbInstanceNetworkConfigAuthorizedExternalNetworksCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceNetworkConfigEnablePublicIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} From e803b36f4c14b728857d5575a567f8b029ae3e8c Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Mon, 3 Jun 2024 20:49:58 +0000 Subject: [PATCH 042/101] Updating AlloyDB Generated Files --- ...stances.alloydb.cnrm.cloud.google.com.yaml | 52 ++++++++++++++++ .../alloydb/v1beta1/alloydbinstance_types.go | 24 +++++++ .../alloydb/v1beta1/zz_generated.deepcopy.go | 59 ++++++++++++++++++ .../resource-docs/alloydb/alloydbinstance.md | 62 +++++++++++++++++++ 4 files changed, 197 insertions(+) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_alloydbinstances.alloydb.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_alloydbinstances.alloydb.cnrm.cloud.google.com.yaml index b9ed05c4ef..1e0326fa9d 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_alloydbinstances.alloydb.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_alloydbinstances.alloydb.cnrm.cloud.google.com.yaml @@ -180,6 +180,27 @@ spec: description: The number of CPU's in the VM instance. type: integer type: object + networkConfig: + description: Instance level network configuration. + properties: + authorizedExternalNetworks: + description: A list of external networks authorized to access + this instance. This field is only allowed to be set when 'enable_public_ip' + is set to true. + items: + properties: + cidrRange: + description: CIDR range for one authorized network of the + instance. + type: string + type: object + type: array + enablePublicIp: + description: Enabling public ip for the instance. If a user wishes + to disable this, please also clear the list of the authorized + external networks set on the same instance. + type: boolean + type: object readPoolConfig: description: Read pool specific config. If the instance type is READ_POOL, this configuration must be provided. @@ -242,6 +263,11 @@ spec: current reported status reflects the most recent desired state of the resource. type: integer + publicIpAddress: + description: The public IP addresses for the Instance. This is available + ONLY when networkConfig.enablePublicIp is set to true. This is the + connection endpoint for an end-user application. + type: string reconciling: description: Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating @@ -421,6 +447,27 @@ spec: description: The number of CPU's in the VM instance. type: integer type: object + networkConfig: + description: Instance level network configuration. + properties: + authorizedExternalNetworks: + description: A list of external networks authorized to access + this instance. This field is only allowed to be set when 'enable_public_ip' + is set to true. + items: + properties: + cidrRange: + description: CIDR range for one authorized network of the + instance. + type: string + type: object + type: array + enablePublicIp: + description: Enabling public ip for the instance. If a user wishes + to disable this, please also clear the list of the authorized + external networks set on the same instance. + type: boolean + type: object readPoolConfig: description: Read pool specific config. If the instance type is READ_POOL, this configuration must be provided. @@ -483,6 +530,11 @@ spec: current reported status reflects the most recent desired state of the resource. type: integer + publicIpAddress: + description: The public IP addresses for the Instance. This is available + ONLY when networkConfig.enablePublicIp is set to true. This is the + connection endpoint for an end-user application. + type: string reconciling: description: Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating diff --git a/pkg/clients/generated/apis/alloydb/v1beta1/alloydbinstance_types.go b/pkg/clients/generated/apis/alloydb/v1beta1/alloydbinstance_types.go index de47633680..bb8db06751 100644 --- a/pkg/clients/generated/apis/alloydb/v1beta1/alloydbinstance_types.go +++ b/pkg/clients/generated/apis/alloydb/v1beta1/alloydbinstance_types.go @@ -35,12 +35,28 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type InstanceAuthorizedExternalNetworks struct { + /* CIDR range for one authorized network of the instance. */ + // +optional + CidrRange *string `json:"cidrRange,omitempty"` +} + type InstanceMachineConfig struct { /* The number of CPU's in the VM instance. */ // +optional CpuCount *int64 `json:"cpuCount,omitempty"` } +type InstanceNetworkConfig struct { + /* A list of external networks authorized to access this instance. This field is only allowed to be set when 'enable_public_ip' is set to true. */ + // +optional + AuthorizedExternalNetworks []InstanceAuthorizedExternalNetworks `json:"authorizedExternalNetworks,omitempty"` + + /* Enabling public ip for the instance. If a user wishes to disable this, please also clear the list of the authorized external networks set on the same instance. */ + // +optional + EnablePublicIp *bool `json:"enablePublicIp,omitempty"` +} + type InstanceReadPoolConfig struct { /* Read capacity, i.e. number of nodes in a read pool instance. */ // +optional @@ -101,6 +117,10 @@ type AlloyDBInstanceSpec struct { // +optional MachineConfig *InstanceMachineConfig `json:"machineConfig,omitempty"` + /* Instance level network configuration. */ + // +optional + NetworkConfig *InstanceNetworkConfig `json:"networkConfig,omitempty"` + /* Read pool specific config. If the instance type is READ_POOL, this configuration must be provided. */ // +optional ReadPoolConfig *InstanceReadPoolConfig `json:"readPoolConfig,omitempty"` @@ -130,6 +150,10 @@ type AlloyDBInstanceStatus struct { // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + /* The public IP addresses for the Instance. This is available ONLY when networkConfig.enablePublicIp is set to true. This is the connection endpoint for an end-user application. */ + // +optional + PublicIpAddress *string `json:"publicIpAddress,omitempty"` + /* Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. */ // +optional Reconciling *bool `json:"reconciling,omitempty"` diff --git a/pkg/clients/generated/apis/alloydb/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/alloydb/v1beta1/zz_generated.deepcopy.go index a31a34b22c..ae05de7a55 100644 --- a/pkg/clients/generated/apis/alloydb/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/alloydb/v1beta1/zz_generated.deepcopy.go @@ -517,6 +517,11 @@ func (in *AlloyDBInstanceSpec) DeepCopyInto(out *AlloyDBInstanceSpec) { *out = new(InstanceMachineConfig) (*in).DeepCopyInto(*out) } + if in.NetworkConfig != nil { + in, out := &in.NetworkConfig, &out.NetworkConfig + *out = new(InstanceNetworkConfig) + (*in).DeepCopyInto(*out) + } if in.ReadPoolConfig != nil { in, out := &in.ReadPoolConfig, &out.ReadPoolConfig *out = new(InstanceReadPoolConfig) @@ -568,6 +573,11 @@ func (in *AlloyDBInstanceStatus) DeepCopyInto(out *AlloyDBInstanceStatus) { *out = new(int64) **out = **in } + if in.PublicIpAddress != nil { + in, out := &in.PublicIpAddress, &out.PublicIpAddress + *out = new(string) + **out = **in + } if in.Reconciling != nil { in, out := &in.Reconciling, &out.Reconciling *out = new(bool) @@ -1321,6 +1331,27 @@ func (in *ClusterWeeklySchedule) DeepCopy() *ClusterWeeklySchedule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceAuthorizedExternalNetworks) DeepCopyInto(out *InstanceAuthorizedExternalNetworks) { + *out = *in + if in.CidrRange != nil { + in, out := &in.CidrRange, &out.CidrRange + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceAuthorizedExternalNetworks. +func (in *InstanceAuthorizedExternalNetworks) DeepCopy() *InstanceAuthorizedExternalNetworks { + if in == nil { + return nil + } + out := new(InstanceAuthorizedExternalNetworks) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceMachineConfig) DeepCopyInto(out *InstanceMachineConfig) { *out = *in @@ -1342,6 +1373,34 @@ func (in *InstanceMachineConfig) DeepCopy() *InstanceMachineConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceNetworkConfig) DeepCopyInto(out *InstanceNetworkConfig) { + *out = *in + if in.AuthorizedExternalNetworks != nil { + in, out := &in.AuthorizedExternalNetworks, &out.AuthorizedExternalNetworks + *out = make([]InstanceAuthorizedExternalNetworks, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnablePublicIp != nil { + in, out := &in.EnablePublicIp, &out.EnablePublicIp + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceNetworkConfig. +func (in *InstanceNetworkConfig) DeepCopy() *InstanceNetworkConfig { + if in == nil { + return nil + } + out := new(InstanceNetworkConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceReadPoolConfig) DeepCopyInto(out *InstanceReadPoolConfig) { *out = *in diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md index 9de4420d74..2c969695cd 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md @@ -99,6 +99,10 @@ instanceTypeRef: namespace: string machineConfig: cpuCount: integer +networkConfig: + authorizedExternalNetworks: + - cidrRange: string + enablePublicIp: boolean readPoolConfig: nodeCount: integer resourceID: string @@ -290,6 +294,56 @@ Use deletionPolicy = "FORCE" in the associated secondary cluster and delete the

{% verbatim %}The number of CPU's in the VM instance.{% endverbatim %}

+ + +

networkConfig

+

Optional

+ + +

object

+

{% verbatim %}Instance level network configuration.{% endverbatim %}

+ + + + +

networkConfig.authorizedExternalNetworks

+

Optional

+ + +

list (object)

+

{% verbatim %}A list of external networks authorized to access this instance. This field is only allowed to be set when 'enable_public_ip' is set to true.{% endverbatim %}

+ + + + +

networkConfig.authorizedExternalNetworks[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

networkConfig.authorizedExternalNetworks[].cidrRange

+

Optional

+ + +

string

+

{% verbatim %}CIDR range for one authorized network of the instance.{% endverbatim %}

+ + + + +

networkConfig.enablePublicIp

+

Optional

+ + +

boolean

+

{% verbatim %}Enabling public ip for the instance. If a user wishes to disable this, please also clear the list of the authorized external networks set on the same instance.{% endverbatim %}

+ +

readPoolConfig

@@ -338,6 +392,7 @@ createTime: string ipAddress: string name: string observedGeneration: integer +publicIpAddress: string reconciling: boolean state: string uid: string @@ -428,6 +483,13 @@ updateTime: string

{% verbatim %}ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource.{% endverbatim %}

+ + publicIpAddress + +

string

+

{% verbatim %}The public IP addresses for the Instance. This is available ONLY when networkConfig.enablePublicIp is set to true. This is the connection endpoint for an end-user application.{% endverbatim %}

+ + reconciling From f0f8e3bf380cacaa32185cd8a466152579d437cd Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Mon, 3 Jun 2024 20:57:03 +0000 Subject: [PATCH 043/101] Update testdata and samples for primary instance --- .../primary-instance/alloydb_v1beta1_alloydbinstance.yaml | 5 +++++ .../alloydbinstance/fullalloydbinstance/create.yaml | 7 ++++++- .../alloydbinstance/fullalloydbinstance/update.yaml | 5 ++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml b/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml index 87fa0a46cc..087dc992c2 100644 --- a/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml +++ b/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml @@ -25,3 +25,8 @@ spec: enable_google_adaptive_autovacuum: "off" machineConfig: cpuCount: 2 + networkConfig: + enablePublicIp: true + authorizedExternalNetworks: + - cidrRange: 8.8.8.8/30 + - cidrRange: 8.8.4.4/30 diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml index df11e37457..330155d542 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml @@ -23,4 +23,9 @@ spec: name: alloydbcluster-${uniqueId} machineConfig: cpuCount: 2 - resourceID: alloydbinstance${uniqueId} \ No newline at end of file + resourceID: alloydbinstance${uniqueId} + networkConfig: + enablePublicIp: true + authorizedExternalNetworks: + - cidrRange: 8.8.8.8/30 + - cidrRange: 8.8.4.4/30 diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/update.yaml b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/update.yaml index 8f4b1021f4..ffededdfc0 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/update.yaml @@ -25,4 +25,7 @@ spec: enable_google_adaptive_autovacuum: "off" machineConfig: cpuCount: 4 - resourceID: alloydbinstance${uniqueId} \ No newline at end of file + resourceID: alloydbinstance${uniqueId} + networkConfig: + enablePublicIp: false + authorizedExternalNetworks: [] From b2b4cfae66d032ddea22866ce79a95c43ad432e3 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Mon, 3 Jun 2024 21:49:01 +0000 Subject: [PATCH 044/101] Update AlloyDB resources docs --- .../generated/resource-docs/alloydb/alloydbinstance.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md index 2c969695cd..f31e5adf01 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md @@ -552,6 +552,11 @@ spec: enable_google_adaptive_autovacuum: "off" machineConfig: cpuCount: 2 + networkConfig: + enablePublicIp: true + authorizedExternalNetworks: + - cidrRange: 8.8.8.8/30 + - cidrRange: 8.8.4.4/30 --- apiVersion: alloydb.cnrm.cloud.google.com/v1beta1 kind: AlloyDBCluster From 1e9c0110db933907a64e0200b2c9ef5f520478e0 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Tue, 4 Jun 2024 22:06:10 +0000 Subject: [PATCH 045/101] Update yaml files to enable password complexity and have a complex password --- .../regular-cluster/alloydb_v1beta1_alloydbcluster.yaml | 2 +- .../primary-instance/alloydb_v1beta1_alloydbinstance.yaml | 1 + .../v1beta1/alloydbcluster/fullalloydbcluster/create.yaml | 4 ++-- .../v1beta1/alloydbinstance/fullalloydbinstance/create.yaml | 2 ++ 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/config/samples/resources/alloydbcluster/regular-cluster/alloydb_v1beta1_alloydbcluster.yaml b/config/samples/resources/alloydbcluster/regular-cluster/alloydb_v1beta1_alloydbcluster.yaml index ced290944f..3ab32f8afc 100644 --- a/config/samples/resources/alloydbcluster/regular-cluster/alloydb_v1beta1_alloydbcluster.yaml +++ b/config/samples/resources/alloydbcluster/regular-cluster/alloydb_v1beta1_alloydbcluster.yaml @@ -55,4 +55,4 @@ spec: initialUser: user: "postgres" password: - value: "postgres" \ No newline at end of file + value: "Postgres123" diff --git a/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml b/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml index 087dc992c2..61850ab15c 100644 --- a/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml +++ b/config/samples/resources/alloydbinstance/primary-instance/alloydb_v1beta1_alloydbinstance.yaml @@ -23,6 +23,7 @@ spec: name: alloydbinstance-dep-primary databaseFlags: enable_google_adaptive_autovacuum: "off" + password.enforce_complexity: "on" machineConfig: cpuCount: 2 networkConfig: diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbcluster/fullalloydbcluster/create.yaml b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbcluster/fullalloydbcluster/create.yaml index 8e8969e97b..6cd6858b39 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbcluster/fullalloydbcluster/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbcluster/fullalloydbcluster/create.yaml @@ -27,7 +27,7 @@ spec: initialUser: user: postgres password: - value: postgres + value: Postgres123 automatedBackupPolicy: backupWindow: 7200s @@ -59,4 +59,4 @@ spec: minutes: 0 seconds: 0 nanos: 0 - \ No newline at end of file + diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml index 330155d542..0c7bf951fe 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/create.yaml @@ -24,6 +24,8 @@ spec: machineConfig: cpuCount: 2 resourceID: alloydbinstance${uniqueId} + databaseFlags: + password.enforce_complexity: "on" networkConfig: enablePublicIp: true authorizedExternalNetworks: From 1a67020d74cc4fa24703ea016b9cc193585a1bb3 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Tue, 4 Jun 2024 23:15:36 +0000 Subject: [PATCH 046/101] Update mock instance to support updating the networkConfig field --- mockgcp/mockalloydb/instance.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mockgcp/mockalloydb/instance.go b/mockgcp/mockalloydb/instance.go index 90dab65844..bd1bd63cc6 100644 --- a/mockgcp/mockalloydb/instance.go +++ b/mockgcp/mockalloydb/instance.go @@ -126,6 +126,8 @@ func (s *AlloyDBAdminV1) UpdateInstance(ctx context.Context, req *pb.UpdateInsta obj.MachineConfig = req.Instance.GetMachineConfig() case "pscInstanceConfig": obj.PscInstanceConfig = req.Instance.GetPscInstanceConfig() + case "networkConfig": + obj.NetworkConfig = req.Instance.GetNetworkConfig() default: return nil, status.Errorf(codes.InvalidArgument, "update_mask path %q not valid", path) } From ff1b81dfb6814dde61055cf9a4aa20e5e3e67d99 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Tue, 4 Jun 2024 23:28:38 +0000 Subject: [PATCH 047/101] Updating AlloyDB docs with complex password --- .../generated/resource-docs/alloydb/alloydbcluster.md | 2 +- .../generated/resource-docs/alloydb/alloydbinstance.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbcluster.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbcluster.md index 15ac0d11a8..8b3ceeb586 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbcluster.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbcluster.md @@ -1427,7 +1427,7 @@ spec: initialUser: user: "postgres" password: - value: "postgres" + value: "Postgres123" --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md index f31e5adf01..bd31a587fd 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/alloydb/alloydbinstance.md @@ -550,6 +550,7 @@ spec: name: alloydbinstance-dep-primary databaseFlags: enable_google_adaptive_autovacuum: "off" + password.enforce_complexity: "on" machineConfig: cpuCount: 2 networkConfig: From b8b4eb51d78ce07e35b0f50fffbf4bd7d864a8ea Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Mon, 17 Jun 2024 15:25:21 -0700 Subject: [PATCH 048/101] Update google_alloydb_instance resource to remove custom code from Terraform --- .../alloydb/resource_alloydb_instance.go | 59 ------------------- 1 file changed, 59 deletions(-) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go index e369c9df9a..7f03e7af7c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/alloydb/resource_alloydb_instance.go @@ -307,20 +307,6 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err billingProject = bp } - // Temporarily remove the enablePublicIp field if it is set to true since the - // API prohibits creating instances with public IP enabled. - var nc map[string]interface{} - if obj["networkConfig"] == nil { - nc = make(map[string]interface{}) - } else { - nc = obj["networkConfig"].(map[string]interface{}) - } - if nc["enablePublicIp"] == true { - delete(nc, "enablePublicIp") - delete(nc, "authorizedExternalNetworks") - } - obj["networkConfig"] = nc - // Read the config and call createsecondary api if instance_type is SECONDARY if instanceType := d.Get("instance_type"); instanceType == "SECONDARY" { @@ -356,51 +342,6 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error waiting to create Instance: %s", err) } - // If enablePublicIp is set to true, then we must create the instance first with - // it disabled then update to enable it. - networkConfigProp, err = expandAlloydbInstanceNetworkConfig(d.Get("network_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { - nc := networkConfigProp.(map[string]interface{}) - if nc["enablePublicIp"] == true { - obj["networkConfig"] = networkConfigProp - - updateMask := []string{} - updateMask = append(updateMask, "networkConfig") - url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") - if err != nil { - return err - } - url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - updateRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "PATCH", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutUpdate), - }) - if err != nil { - return fmt.Errorf("Error updating the Instance to enable public ip: %s", err) - } else { - log.Printf("[DEBUG] Finished updating Instance to enable public ip %q: %#v", d.Id(), updateRes) - } - err = AlloydbOperationWaitTime( - config, updateRes, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - } - } - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) return resourceAlloydbInstanceRead(d, meta) From 20f153c62888f92786d2228f59f35302f8033d02 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Thu, 20 Jun 2024 19:22:38 +0000 Subject: [PATCH 049/101] Add new public IP fields to the CRD acronyms unit test --- tests/apichecks/testdata/exceptions/acronyms.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 84649cd7a1..5035df97c3 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -3,6 +3,10 @@ [acronyms] crd=alloydbclusters.alloydb.cnrm.cloud.google.com version=v1alpha1: field ".status.migrationSource[].referenceId" should be ".status.migrationSource[].referenceID" [acronyms] crd=alloydbclusters.alloydb.cnrm.cloud.google.com version=v1beta1: field ".spec.networkConfig.allocatedIpRange" should be ".spec.networkConfig.allocatedIPRange" [acronyms] crd=alloydbclusters.alloydb.cnrm.cloud.google.com version=v1beta1: field ".status.migrationSource[].referenceId" should be ".status.migrationSource[].referenceID" +[acronyms] crd=alloydbinstances.alloydb.cnrm.cloud.google.com version=v1alpha1: field ".spec.networkConfig.enablePublicIp" should be ".spec.networkConfig.enablePublicIP" +[acronyms] crd=alloydbinstances.alloydb.cnrm.cloud.google.com version=v1alpha1: field ".status.publicIpAddress" should be ".status.publicIPAddress" +[acronyms] crd=alloydbinstances.alloydb.cnrm.cloud.google.com version=v1beta1: field ".spec.networkConfig.enablePublicIp" should be ".spec.networkConfig.enablePublicIP" +[acronyms] crd=alloydbinstances.alloydb.cnrm.cloud.google.com version=v1beta1: field ".status.publicIpAddress" should be ".status.publicIPAddress" [acronyms] crd=apigatewayapiconfigs.apigateway.cnrm.cloud.google.com version=v1alpha1: field ".spec.apiConfigIdPrefix" should be ".spec.apiConfigIDPrefix" [acronyms] crd=apigatewayapiconfigs.apigateway.cnrm.cloud.google.com version=v1alpha1: field ".status.serviceConfigId" should be ".status.serviceConfigID" [acronyms] crd=apigeeaddonsconfigs.apigee.cnrm.cloud.google.com version=v1alpha1: field ".spec.addonsConfig.advancedApiOpsConfig" should be ".spec.addonsConfig.advancedAPIOpsConfig" @@ -506,4 +510,4 @@ [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.awsS3DataSource.awsAccessKey.accessKeyId" should be ".spec.transferSpec.awsS3DataSource.awsAccessKey.accessKeyID" [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.httpDataSource.listUrl" should be ".spec.transferSpec.httpDataSource.listURL" [acronyms] crd=vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com version=v1beta1: field ".spec.ipCidrRange" should be ".spec.ipCIDRRange" -[acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" \ No newline at end of file +[acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" From 9e0f87c0edcf763fc968a195f4e276f24ccb92d6 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Thu, 20 Jun 2024 21:13:18 +0000 Subject: [PATCH 050/101] Update MockGCP golden logs --- ...ted_object_fullalloydbinstance.golden.yaml | 7 ++- .../fullalloydbinstance/_http.log | 46 ++++++++++++++++--- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_generated_object_fullalloydbinstance.golden.yaml b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_generated_object_fullalloydbinstance.golden.yaml index 5c755fc9c4..440d891b0f 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_generated_object_fullalloydbinstance.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_generated_object_fullalloydbinstance.golden.yaml @@ -7,7 +7,7 @@ metadata: finalizers: - cnrm.cloud.google.com/finalizer - cnrm.cloud.google.com/deletion-defender - generation: 3 + generation: 4 labels: cnrm-test: "true" name: alloydbinstance-${uniqueId} @@ -18,10 +18,13 @@ spec: name: alloydbcluster-${uniqueId} databaseFlags: enable_google_adaptive_autovacuum: "off" + password.enforce_complexity: "on" instanceTypeRef: name: alloydbcluster-${uniqueId} machineConfig: cpuCount: 4 + networkConfig: + enablePublicIp: false resourceID: alloydbinstance${uniqueId} status: conditions: @@ -32,5 +35,5 @@ status: type: Ready createTime: "1970-01-01T00:00:00Z" name: projects/${projectId}/locations/europe-north1/clusters/alloydbcluster${uniqueId}/instances/alloydbinstance${uniqueId} - observedGeneration: 3 + observedGeneration: 4 state: STATE_UNSPECIFIED diff --git a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_http.log b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_http.log index 40f209d5a7..372ff832e5 100644 --- a/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/alloydb/v1beta1/alloydbinstance/fullalloydbinstance/_http.log @@ -551,6 +551,9 @@ Content-Type: application/json User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager { + "databaseFlags": { + "password.enforce_complexity": "on" + }, "instanceType": "PRIMARY", "labels": { "cnrm-test": "true", @@ -558,6 +561,17 @@ User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 t }, "machineConfig": { "cpuCount": 2 + }, + "networkConfig": { + "authorizedExternalNetworks": [ + { + "cidrRange": "8.8.8.8/30" + }, + { + "cidrRange": "8.8.4.4/30" + } + ], + "enablePublicIp": true } } @@ -586,7 +600,9 @@ Grpc-Metadata-Content-Type: application/grpc "availabilityType": "AVAILABILITY_TYPE_UNSPECIFIED", "clientConnectionConfig": null, "createTime": "2024-04-01T12:34:56.123456Z", - "databaseFlags": {}, + "databaseFlags": { + "password.enforce_complexity": "on" + }, "deleteTime": null, "displayName": "", "etag": "abcdef0123A=", @@ -601,7 +617,17 @@ Grpc-Metadata-Content-Type: application/grpc "cpuCount": 2 }, "name": "projects/${projectId}/locations/europe-north1/clusters/alloydbcluster${uniqueId}/instances/alloydbinstance${uniqueId}", - "networkConfig": null, + "networkConfig": { + "authorizedExternalNetworks": [ + { + "cidrRange": "8.8.8.8/30" + }, + { + "cidrRange": "8.8.4.4/30" + } + ], + "enablePublicIp": true + }, "nodes": [], "pscInstanceConfig": null, "publicIpAddress": "", @@ -618,14 +644,15 @@ Grpc-Metadata-Content-Type: application/grpc --- -PATCH https://alloydb.googleapis.com/v1beta/projects/${projectId}/locations/europe-north1/clusters/alloydbcluster${uniqueId}/instances/alloydbinstance${uniqueId}?alt=json&updateMask=databaseFlags%2CmachineConfig +PATCH https://alloydb.googleapis.com/v1beta/projects/${projectId}/locations/europe-north1/clusters/alloydbcluster${uniqueId}/instances/alloydbinstance${uniqueId}?alt=json&updateMask=databaseFlags%2CmachineConfig%2CnetworkConfig Content-Type: application/json User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager { "availabilityType": "AVAILABILITY_TYPE_UNSPECIFIED", "databaseFlags": { - "enable_google_adaptive_autovacuum": "off" + "enable_google_adaptive_autovacuum": "off", + "password.enforce_complexity": "on" }, "labels": { "cnrm-test": "true", @@ -633,7 +660,8 @@ User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 t }, "machineConfig": { "cpuCount": 4 - } + }, + "networkConfig": {} } 200 OK @@ -662,7 +690,8 @@ Grpc-Metadata-Content-Type: application/grpc "clientConnectionConfig": null, "createTime": "2024-04-01T12:34:56.123456Z", "databaseFlags": { - "enable_google_adaptive_autovacuum": "off" + "enable_google_adaptive_autovacuum": "off", + "password.enforce_complexity": "on" }, "deleteTime": null, "displayName": "", @@ -678,7 +707,10 @@ Grpc-Metadata-Content-Type: application/grpc "cpuCount": 4 }, "name": "projects/${projectId}/locations/europe-north1/clusters/alloydbcluster${uniqueId}/instances/alloydbinstance${uniqueId}", - "networkConfig": null, + "networkConfig": { + "authorizedExternalNetworks": [], + "enablePublicIp": false + }, "nodes": [], "pscInstanceConfig": null, "publicIpAddress": "", From b87bd7d5b38e0fe5e7d10a1579cf4b3144ff6a1c Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Thu, 20 Jun 2024 22:20:41 +0000 Subject: [PATCH 051/101] Add newline to unit test acronyms to fix unit tests --- tests/apichecks/testdata/exceptions/acronyms.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 5035df97c3..2e8fe9f965 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -511,3 +511,4 @@ [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.httpDataSource.listUrl" should be ".spec.transferSpec.httpDataSource.listURL" [acronyms] crd=vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com version=v1beta1: field ".spec.ipCidrRange" should be ".spec.ipCIDRRange" [acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" + From 9726162dd9743f606b3b3dbd5e5207a4de90a0a7 Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Thu, 20 Jun 2024 23:13:18 +0000 Subject: [PATCH 052/101] Revert "Add newline to unit test acronyms to fix unit tests" This reverts commit a20f61ac86b838591a895c26bd8ae74c97c6d3eb. --- tests/apichecks/testdata/exceptions/acronyms.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 2e8fe9f965..5035df97c3 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -511,4 +511,3 @@ [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.httpDataSource.listUrl" should be ".spec.transferSpec.httpDataSource.listURL" [acronyms] crd=vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com version=v1beta1: field ".spec.ipCidrRange" should be ".spec.ipCIDRRange" [acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" - From 32bfc458a4c96117b8854741d13c7b9eb46fbf9e Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Wed, 26 Jun 2024 09:40:36 -0700 Subject: [PATCH 053/101] remove extra newline character in acronyms to fix unit test --- tests/apichecks/testdata/exceptions/acronyms.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 5035df97c3..e556a548fa 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -510,4 +510,4 @@ [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.awsS3DataSource.awsAccessKey.accessKeyId" should be ".spec.transferSpec.awsS3DataSource.awsAccessKey.accessKeyID" [acronyms] crd=storagetransferjobs.storagetransfer.cnrm.cloud.google.com version=v1beta1: field ".spec.transferSpec.httpDataSource.listUrl" should be ".spec.transferSpec.httpDataSource.listURL" [acronyms] crd=vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com version=v1beta1: field ".spec.ipCidrRange" should be ".spec.ipCIDRRange" -[acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" +[acronyms] crd=workflowsworkflows.workflows.cnrm.cloud.google.com version=v1alpha1: field ".status.revisionId" should be ".status.revisionID" \ No newline at end of file From 494d9376fd1b4ddc9c8921188d0e75973a15b8e8 Mon Sep 17 00:00:00 2001 From: Joyce Ma Date: Wed, 26 Jun 2024 17:32:01 +0000 Subject: [PATCH 054/101] Fix e2e fixtures storagebucket tests --- .../_generated_object_storagebucketbasic.golden.yaml | 4 ++-- .../_generated_object_storagebucketsoftdelete.golden.yaml | 1 - .../_generated_object_storagebucketzero.golden.yaml | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml index bf2b29e6ab..b1ee58c04c 100644 --- a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketbasic/_generated_object_storagebucketbasic.golden.yaml @@ -8,7 +8,7 @@ metadata: finalizers: - cnrm.cloud.google.com/finalizer - cnrm.cloud.google.com/deletion-defender - generation: 5 + generation: 4 labels: cnrm-test: "true" label-one: value-one @@ -31,7 +31,7 @@ status: reason: UpToDate status: "True" type: Ready - observedGeneration: 5 + observedGeneration: 4 observedState: softDeletePolicy: retentionDurationSeconds: 0 diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml index c52ed4bbb3..e284a25973 100644 --- a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketsoftdelete/_generated_object_storagebucketsoftdelete.golden.yaml @@ -26,7 +26,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: diff --git a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml index 4109c7a187..d4b7e391a7 100644 --- a/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/storage/v1beta1/storagebucket/storagebucketzero/_generated_object_storagebucketzero.golden.yaml @@ -20,7 +20,6 @@ spec: publicAccessPrevention: inherited resourceID: storagebucket-sample-${uniqueId} softDeletePolicy: - effectiveTime: "1970-01-01T00:00:00Z" retentionDurationSeconds: 604800 storageClass: STANDARD versioning: From 750f565f690a393c1dcc71919c9879dcae171bba Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 20 Jun 2024 20:12:49 -0400 Subject: [PATCH 055/101] monitoringdashboard: add collapsibleGroup widget --- .../v1beta1/monitoringdashboard_types.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 5 ++ ...ards.monitoring.cnrm.cloud.google.com.yaml | 40 +++++++++ .../v1beta1/monitoringdashboard_types.go | 14 +++ .../v1beta1/zz_generated.deepcopy.go | 31 +++++++ .../dashboard_generated.mappings.go | 6 +- .../direct/monitoring/roundtrip_test.go | 2 - ...ated_export_monitoringdashboardfull.golden | 3 + ...object_monitoringdashboardfull.golden.yaml | 3 + .../monitoringdashboardfull/_http.log | 18 ++++ .../monitoringdashboardfull/create.yaml | 3 + .../monitoring/monitoringdashboard.md | 88 +++++++++++++++++++ 12 files changed, 210 insertions(+), 5 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index ffa9878362..4d038544b8 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -459,11 +459,11 @@ type Widget struct { // A widget that displays time series data in a tabular format. TimeSeriesTable *TimeSeriesTable `json:"timeSeriesTable,omitempty"` + */ // A widget that groups the other widgets. All widgets that are within // the area spanned by the grouping widget are considered member widgets. CollapsibleGroup *CollapsibleGroup `json:"collapsibleGroup,omitempty"` - */ // A widget that shows a stream of logs. LogsPanel *LogsPanel `json:"logsPanel,omitempty"` diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 40632a6103..26542bc9bc 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1241,6 +1241,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(Empty) **out = **in } + if in.CollapsibleGroup != nil { + in, out := &in.CollapsibleGroup, &out.CollapsibleGroup + *out = new(CollapsibleGroup) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(LogsPanel) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index edfab4f948..21efa8c20b 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -84,6 +84,16 @@ spec: blank: description: A blank space. type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by + the grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget + on first page load. + type: boolean + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -1527,6 +1537,16 @@ spec: blank: description: A blank space. type: object + collapsibleGroup: + description: A widget that groups the other widgets. All + widgets that are within the area spanned by the grouping + widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on first + page load. + type: boolean + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -2897,6 +2917,16 @@ spec: blank: description: A blank space. type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by the + grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on + first page load. + type: boolean + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -4370,6 +4400,16 @@ spec: blank: description: A blank space. type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by + the grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget + on first page load. + type: boolean + type: object logsPanel: description: A widget that shows a stream of logs. properties: diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 705e1671e7..a681ee8b0a 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -101,6 +101,12 @@ type DashboardChartOptions struct { Mode *string `json:"mode,omitempty"` } +type DashboardCollapsibleGroup struct { + /* The collapsed state of the widget on first page load. */ + // +optional + Collapsed *bool `json:"collapsed,omitempty"` +} + type DashboardColumnLayout struct { /* The columns of content to display. */ // +optional @@ -512,6 +518,10 @@ type DashboardWidget struct { // +optional Blank *DashboardBlank `json:"blank,omitempty"` + /* A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets. */ + // +optional + CollapsibleGroup *DashboardCollapsibleGroup `json:"collapsibleGroup,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` @@ -542,6 +552,10 @@ type DashboardWidgets struct { // +optional Blank *DashboardBlank `json:"blank,omitempty"` + /* A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets. */ + // +optional + CollapsibleGroup *DashboardCollapsibleGroup `json:"collapsibleGroup,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index a13c4bcc8b..cd92e19634 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -579,6 +579,27 @@ func (in *DashboardChartOptions) DeepCopy() *DashboardChartOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardCollapsibleGroup) DeepCopyInto(out *DashboardCollapsibleGroup) { + *out = *in + if in.Collapsed != nil { + in, out := &in.Collapsed, &out.Collapsed + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardCollapsibleGroup. +func (in *DashboardCollapsibleGroup) DeepCopy() *DashboardCollapsibleGroup { + if in == nil { + return nil + } + out := new(DashboardCollapsibleGroup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardColumnLayout) DeepCopyInto(out *DashboardColumnLayout) { *out = *in @@ -1319,6 +1340,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardBlank) **out = **in } + if in.CollapsibleGroup != nil { + in, out := &in.CollapsibleGroup, &out.CollapsibleGroup + *out = new(DashboardCollapsibleGroup) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) @@ -1370,6 +1396,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardBlank) **out = **in } + if in.CollapsibleGroup != nil { + in, out := &in.CollapsibleGroup, &out.CollapsibleGroup + *out = new(DashboardCollapsibleGroup) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index ac1cb5bcf9..1eb85cba7b 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -835,7 +835,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.Blank = Empty_FromProto(mapCtx, in.GetBlank()) // MISSING: AlertChart // MISSING: TimeSeriesTable - // MISSING: CollapsibleGroup + out.CollapsibleGroup = CollapsibleGroup_FromProto(mapCtx, in.GetCollapsibleGroup()) out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) // MISSING: IncidentList // MISSING: PieChart @@ -865,7 +865,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { } // MISSING: AlertChart // MISSING: TimeSeriesTable - // MISSING: CollapsibleGroup + if oneof := CollapsibleGroup_ToProto(mapCtx, in.CollapsibleGroup); oneof != nil { + out.Content = &pb.Widget_CollapsibleGroup{CollapsibleGroup: oneof} + } if oneof := LogsPanel_ToProto(mapCtx, in.LogsPanel); oneof != nil { out.Content = &pb.Widget_LogsPanel{LogsPanel: oneof} } diff --git a/pkg/controller/direct/monitoring/roundtrip_test.go b/pkg/controller/direct/monitoring/roundtrip_test.go index be28b1b93c..d1654546d8 100644 --- a/pkg/controller/direct/monitoring/roundtrip_test.go +++ b/pkg/controller/direct/monitoring/roundtrip_test.go @@ -76,8 +76,6 @@ func FuzzMonitoringDashboardSpec(f *testing.F) { unimplementedFields.Insert(widgetPath + ".time_series_table") - unimplementedFields.Insert(widgetPath + ".collapsible_group") - unimplementedFields.Insert(widgetPath + ".pie_chart") unimplementedFields.Insert(widgetPath + ".single_view_group") diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index e1bf15c75c..4e9b6f4a78 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -55,6 +55,9 @@ spec: dividerBelow: true subtitle: Example SectionHeader title: SectionHeader Widget + - collapsibleGroup: + collapsed: true + title: CollapsibleGroup Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 6ec527055a..195ec8a051 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -63,6 +63,9 @@ spec: dividerBelow: true subtitle: Example SectionHeader title: SectionHeader Widget + - collapsibleGroup: + collapsed: true + title: CollapsibleGroup Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 47673e9bb9..fb8c378c26 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -112,6 +112,12 @@ x-goog-request-params: parent=projects%2F${projectId} "subtitle": "Example SectionHeader" }, "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" } ] } @@ -217,6 +223,12 @@ X-Xss-Protection: 0 "subtitle": "Example SectionHeader" }, "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" } ] } @@ -330,6 +342,12 @@ X-Xss-Protection: 0 "subtitle": "Example SectionHeader" }, "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 851158470c..e8f560ec81 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -69,3 +69,6 @@ spec: sectionHeader: dividerBelow: true subtitle: "Example SectionHeader" + - title: "CollapsibleGroup Widget" + collapsibleGroup: + collapsed: true diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index e10eb9c66d..1b277d1b03 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -82,6 +82,8 @@ columnLayout: - weight: integer widgets: - blank: {} + collapsibleGroup: + collapsed: boolean logsPanel: filter: string resourceNames: @@ -236,6 +238,8 @@ gridLayout: columns: integer widgets: - blank: {} + collapsibleGroup: + collapsed: boolean logsPanel: filter: string resourceNames: @@ -391,6 +395,8 @@ mosaicLayout: - height: integer widget: blank: {} + collapsibleGroup: + collapsed: boolean logsPanel: filter: string resourceNames: @@ -554,6 +560,8 @@ rowLayout: - weight: integer widgets: - blank: {} + collapsibleGroup: + collapsed: boolean logsPanel: filter: string resourceNames: @@ -782,6 +790,26 @@ rowLayout:

{% verbatim %}A blank space.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].logsPanel

@@ -2799,6 +2827,26 @@ rowLayout:

{% verbatim %}A blank space.{% endverbatim %}

+ + +

gridLayout.widgets[].collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

gridLayout.widgets[].collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ +

gridLayout.widgets[].logsPanel

@@ -4826,6 +4874,26 @@ rowLayout:

{% verbatim %}A blank space.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.logsPanel

@@ -6943,6 +7011,26 @@ rowLayout:

{% verbatim %}A blank space.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].logsPanel

From 807ef61b35cf9aad1f4686f5ef26bec5ff1a2d5f Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Sat, 22 Jun 2024 00:29:32 +0000 Subject: [PATCH 056/101] feat: promote ComputeManagedSSLCertificate to beta Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- ...ficates.compute.cnrm.cloud.google.com.yaml | 192 ++++++++++++++++-- config/servicemappings/compute.yaml | 23 +++ 2 files changed, 199 insertions(+), 16 deletions(-) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml index a36c2e5666..cb980a5572 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml @@ -6,7 +6,7 @@ metadata: creationTimestamp: null labels: cnrm.cloud.google.com/managed-by-kcc: "true" - cnrm.cloud.google.com/stability-level: alpha + cnrm.cloud.google.com/stability-level: stable cnrm.cloud.google.com/system: "true" cnrm.cloud.google.com/tf2crd: "true" name: computemanagedsslcertificates.compute.cnrm.cloud.google.com @@ -40,7 +40,7 @@ spec: jsonPath: .status.conditions[?(@.type=='Ready')].lastTransitionTime name: Status Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: properties: @@ -118,9 +118,6 @@ spec: type: object status: properties: - certificateId: - description: The unique identifier for the resource. - type: integer conditions: description: Conditions represent the latest available observation of the resource's current state. @@ -147,12 +144,6 @@ spec: type: string type: object type: array - creationTimestamp: - description: Creation timestamp in RFC3339 text format. - type: string - expireTime: - description: Expire time of the certificate in RFC3339 text format. - type: string observedGeneration: description: ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. @@ -160,14 +151,183 @@ spec: current reported status reflects the most recent desired state of the resource. type: integer - selfLink: + observedState: + description: The observed state of the underlying GCP resource. + properties: + certificateId: + description: The unique identifier for the resource. + type: integer + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string + expireTime: + description: Expire time of the certificate in RFC3339 text format. + type: string + selfLink: + type: string + subjectAlternativeNames: + description: Domains associated with the certificate via Subject + Alternative Name. + items: + type: string + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: When 'True', the most recent reconcile of the resource succeeded + jsonPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - description: The reason for the value in 'Ready' + jsonPath: .status.conditions[?(@.type=='Ready')].reason + name: Status + type: string + - description: The last transition time for the value in 'Status' + jsonPath: .status.conditions[?(@.type=='Ready')].lastTransitionTime + name: Status Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'apiVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + description: + description: Immutable. An optional description of this resource. type: string - subjectAlternativeNames: - description: Domains associated with the certificate via Subject Alternative - Name. + managed: + description: |- + Immutable. Properties relevant to a managed certificate. These will be used if the + certificate is managed (as indicated by a value of 'MANAGED' in 'type'). + properties: + domains: + description: |- + Immutable. Domains for which a managed SSL certificate will be valid. Currently, + there can be up to 100 domains in this list. + items: + type: string + type: array + required: + - domains + type: object + projectRef: + description: The project that this resource belongs to. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `name` field of a `Project` resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + resourceID: + description: Immutable. Optional. The name of the resource. Used for + creation and acquisition. When unset, the value of `metadata.name` + is used as the default. + type: string + type: + description: |- + Immutable. Enum field whose value is always 'MANAGED' - used to signal to the API + which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]. + type: string + required: + - projectRef + type: object + status: + properties: + conditions: + description: Conditions represent the latest available observation + of the resource's current state. items: - type: string + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: Status is the status of the condition. Can be True, + False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object type: array + observedGeneration: + description: ObservedGeneration is the generation of the resource + that was most recently observed by the Config Connector controller. + If this is equal to metadata.generation, then that means that the + current reported status reflects the most recent desired state of + the resource. + type: integer + observedState: + description: The observed state of the underlying GCP resource. + properties: + certificateId: + description: The unique identifier for the resource. + type: integer + creationTimestamp: + description: Creation timestamp in RFC3339 text format. + type: string + expireTime: + description: Expire time of the certificate in RFC3339 text format. + type: string + selfLink: + type: string + subjectAlternativeNames: + description: Domains associated with the certificate via Subject + Alternative Name. + items: + type: string + type: array + type: object type: object required: - spec diff --git a/config/servicemappings/compute.yaml b/config/servicemappings/compute.yaml index 1ab1b4a22e..7e1aaa780a 100644 --- a/config/servicemappings/compute.yaml +++ b/config/servicemappings/compute.yaml @@ -1246,6 +1246,29 @@ spec: containers: - type: project tfField: project + - name: google_compute_managed_ssl_certificate + kind: ComputeManagedSSLCertificate + idTemplate: "projects/{{project}}/global/sslCertificates/{{name}}" + idTemplateCanBeUsedToMatchResourceName: false + resourceAvailableInAssetInventory: false + v1alpha1ToV1beta1: true + storageVersion: v1alpha1 + metadataMapping: + name: name + resourceID: + targetField: name + hierarchicalReferences: + - type: project + key: projectRef + resourceReferences: + - tfField: project + key: projectRef + description: |- + The project that this resource belongs to. + gvk: + kind: Project + version: v1beta1 + group: resourcemanager.cnrm.cloud.google.com - name: google_compute_network kind: ComputeNetwork metadataMapping: From 2b133110359d319f21e3fa94da4c95f0d17189cb Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 17:44:47 -0400 Subject: [PATCH 057/101] chore: don't try to install protoc if already available This avoids requiring sudo every invocation of make. --- dev/tools/proto-to-mapper/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/tools/proto-to-mapper/Makefile b/dev/tools/proto-to-mapper/Makefile index 33eecff19a..aa2829341b 100644 --- a/dev/tools/proto-to-mapper/Makefile +++ b/dev/tools/proto-to-mapper/Makefile @@ -16,7 +16,7 @@ generate-pb: install-protoc-linux go run . -apis ../../../apis/ --api-packages github.com/GoogleCloudPlatform/apis install-protoc-linux: - sudo apt install -y protobuf-compiler + which protoc || sudo apt install -y protobuf-compiler install-protoc-mac: sudo brew install protobuf From bc0bb878952d53f23dceac9d42b1281d665645fa Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Wed, 26 Jun 2024 22:25:58 +0000 Subject: [PATCH 058/101] chore: cbwp change status networkRef to external --- apis/cloudbuild/v1alpha1/workerpool_types.go | 27 ++++++++++-- .../v1alpha1/zz_generated.deepcopy.go | 42 ++++++++++++++++--- ...ools.cloudbuild.cnrm.cloud.google.com.yaml | 38 +++-------------- .../v1alpha1/cloudbuildworkerpool_types.go | 8 ++-- .../v1alpha1/zz_generated.deepcopy.go | 10 ++--- .../direct/cloudbuild/workerpool_mappings.go | 27 +++++++++--- ...ed_object_cloudbuildworkerpool.golden.yaml | 3 +- 7 files changed, 97 insertions(+), 58 deletions(-) diff --git a/apis/cloudbuild/v1alpha1/workerpool_types.go b/apis/cloudbuild/v1alpha1/workerpool_types.go index 82dc4ce3dc..5714706ca5 100644 --- a/apis/cloudbuild/v1alpha1/workerpool_types.go +++ b/apis/cloudbuild/v1alpha1/workerpool_types.go @@ -54,7 +54,7 @@ type PrivatePoolV1Config struct { WorkerConfig *PrivatePoolV1Config_WorkerConfig `json:"workerConfig,omitempty"` // Network configuration for the pool. - NetworkConfig *PrivatePoolV1Config_NetworkConfig `json:"networkConfig,omitempty"` + NetworkConfig *PrivatePoolV1Config_NetworkConfigSpec `json:"networkConfig,omitempty"` } // +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config.WorkerConfig @@ -74,7 +74,7 @@ type PrivatePoolV1Config_WorkerConfig struct { } // +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config.NetworkConfig -type PrivatePoolV1Config_NetworkConfig struct { +type PrivatePoolV1Config_NetworkConfigSpec struct { // Immutable. The network definition that the workers are peered // to. If this section is left empty, the workers will be peered to // `WorkerPool.project_id` on the service producer network. @@ -94,6 +94,27 @@ type PrivatePoolV1Config_NetworkConfig struct { PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` } +// +kcc:proto=google.devtools.cloudbuild.v1.PrivatePoolV1Config.NetworkConfig +type PrivatePoolV1Config_NetworkConfigStatus struct { + // Immutable. The network definition that the workers are peered + // to. If this section is left empty, the workers will be peered to + // `WorkerPool.project_id` on the service producer network. + PeeredNetwork *string `json:"peeredNetwork,omitempty"` + + // Option to configure network egress for the workers. + EgressOption *string `json:"egressOption,omitempty"` + + // Immutable. Subnet IP range within the peered network. This is specified + // in CIDR notation with a slash and the subnet prefix size. You can + // optionally specify an IP address before the subnet prefix value. e.g. + // `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a + // prefix size of 29 bits. + // `/16` would specify a prefix size of 16 bits, with an automatically + // determined IP within the peered VPC. + // If unspecified, a value of `/24` will be used. + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` +} + // CloudBuildWorkerPoolStatus defines the observed state of Instance type CloudBuildWorkerPoolStatus struct { /* Conditions represent the latest available observations of the @@ -130,7 +151,7 @@ type CloudBuildWorkerPoolObservedState struct { WorkerConfig *PrivatePoolV1Config_WorkerConfig `json:"workerConfig,omitempty"` // Network configuration for the pool. - NetworkConfig *PrivatePoolV1Config_NetworkConfig `json:"networkConfig,omitempty"` + NetworkConfig *PrivatePoolV1Config_NetworkConfigStatus `json:"networkConfig,omitempty"` /* The Checksum computed by the server, using weak indicator.*/ // +optional diff --git a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go index 34b685e81f..22e40cd60d 100644 --- a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go @@ -89,7 +89,7 @@ func (in *CloudBuildWorkerPoolObservedState) DeepCopyInto(out *CloudBuildWorkerP } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(PrivatePoolV1Config_NetworkConfig) + *out = new(PrivatePoolV1Config_NetworkConfigStatus) (*in).DeepCopyInto(*out) } if in.ETag != nil { @@ -184,7 +184,7 @@ func (in *PrivatePoolV1Config) DeepCopyInto(out *PrivatePoolV1Config) { } if in.NetworkConfig != nil { in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(PrivatePoolV1Config_NetworkConfig) + *out = new(PrivatePoolV1Config_NetworkConfigSpec) (*in).DeepCopyInto(*out) } } @@ -200,7 +200,7 @@ func (in *PrivatePoolV1Config) DeepCopy() *PrivatePoolV1Config { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivatePoolV1Config_NetworkConfig) DeepCopyInto(out *PrivatePoolV1Config_NetworkConfig) { +func (in *PrivatePoolV1Config_NetworkConfigSpec) DeepCopyInto(out *PrivatePoolV1Config_NetworkConfigSpec) { *out = *in out.PeeredNetworkRef = in.PeeredNetworkRef if in.EgressOption != nil { @@ -215,12 +215,42 @@ func (in *PrivatePoolV1Config_NetworkConfig) DeepCopyInto(out *PrivatePoolV1Conf } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config_NetworkConfig. -func (in *PrivatePoolV1Config_NetworkConfig) DeepCopy() *PrivatePoolV1Config_NetworkConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config_NetworkConfigSpec. +func (in *PrivatePoolV1Config_NetworkConfigSpec) DeepCopy() *PrivatePoolV1Config_NetworkConfigSpec { if in == nil { return nil } - out := new(PrivatePoolV1Config_NetworkConfig) + out := new(PrivatePoolV1Config_NetworkConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivatePoolV1Config_NetworkConfigStatus) DeepCopyInto(out *PrivatePoolV1Config_NetworkConfigStatus) { + *out = *in + if in.PeeredNetwork != nil { + in, out := &in.PeeredNetwork, &out.PeeredNetwork + *out = new(string) + **out = **in + } + if in.EgressOption != nil { + in, out := &in.EgressOption, &out.EgressOption + *out = new(string) + **out = **in + } + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivatePoolV1Config_NetworkConfigStatus. +func (in *PrivatePoolV1Config_NetworkConfigStatus) DeepCopy() *PrivatePoolV1Config_NetworkConfigStatus { + if in == nil { + return nil + } + out := new(PrivatePoolV1Config_NetworkConfigStatus) in.DeepCopyInto(out) return out } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml index f7f83b9bd6..8b4b5614b2 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml @@ -216,6 +216,12 @@ spec: egressOption: description: Option to configure network egress for the workers. type: string + peeredNetwork: + description: Immutable. The network definition that the workers + are peered to. If this section is left empty, the workers + will be peered to `WorkerPool.project_id` on the service + producer network. + type: string peeredNetworkIPRange: description: Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash @@ -226,38 +232,6 @@ spec: of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. type: string - peeredNetworkRef: - description: Immutable. The network definition that the workers - are peered to. If this section is left empty, the workers - will be peered to `WorkerPool.project_id` on the service - producer network. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The compute network selflink of form "projects//global/networks/", - when not managed by KCC. - type: string - name: - description: The `name` field of a `ComputeNetwork` resource. - type: string - namespace: - description: The `namespace` field of a `ComputeNetwork` - resource. - type: string - type: object type: object updateTime: description: The last update timestamp of the workerpool. diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go index 34c5a7af91..12d4f8cbd4 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go @@ -91,13 +91,13 @@ type WorkerpoolNetworkConfigStatus struct { // +optional EgressOption *string `json:"egressOption,omitempty"` - /* Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. */ + /* Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. */ // +optional - PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` + PeeredNetwork *string `json:"peeredNetwork,omitempty"` - /* Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. */ + /* Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used. */ // +optional - PeeredNetworkRef *v1alpha1.ResourceRef `json:"peeredNetworkRef,omitempty"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange,omitempty"` } type WorkerpoolObservedStateStatus struct { diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go index 8969c597aa..69d2a97473 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go @@ -193,14 +193,14 @@ func (in *WorkerpoolNetworkConfigStatus) DeepCopyInto(out *WorkerpoolNetworkConf *out = new(string) **out = **in } - if in.PeeredNetworkIPRange != nil { - in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + if in.PeeredNetwork != nil { + in, out := &in.PeeredNetwork, &out.PeeredNetwork *out = new(string) **out = **in } - if in.PeeredNetworkRef != nil { - in, out := &in.PeeredNetworkRef, &out.PeeredNetworkRef - *out = new(k8sv1alpha1.ResourceRef) + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + *out = new(string) **out = **in } return diff --git a/pkg/controller/direct/cloudbuild/workerpool_mappings.go b/pkg/controller/direct/cloudbuild/workerpool_mappings.go index 9fb1bb4b51..eedf53f854 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_mappings.go +++ b/pkg/controller/direct/cloudbuild/workerpool_mappings.go @@ -29,11 +29,15 @@ func CloudBuildWorkerPoolObservedState_FromProto(mapCtx *MapContext, in *pb.Work } out := &krm.CloudBuildWorkerPoolObservedState{} out.ETag = LazyPtr(in.Etag) - privateConfig := PrivatePoolV1Config_FromProto(mapCtx, in.GetPrivatePoolV1Config()) - out.NetworkConfig = privateConfig.NetworkConfig - out.WorkerConfig = privateConfig.WorkerConfig out.CreateTime = ToOpenAPIDateTime(in.GetCreateTime()) out.UpdateTime = ToOpenAPIDateTime(in.GetUpdateTime()) + + privateConfig := in.GetPrivatePoolV1Config() + if privateConfig != nil { + // privateConfig := PrivatePoolV1ConfigStatus_FromProto(mapCtx, in.GetPrivatePoolV1Config()) + out.WorkerConfig = PrivatePoolV1Config_WorkerConfig_FromProto(mapCtx, privateConfig.GetWorkerConfig()) + out.NetworkConfig = PrivatePoolV1Config_NetworkConfigStatus_FromProto(mapCtx, privateConfig.GetNetworkConfig()) + } return out } @@ -49,6 +53,17 @@ func CloudBuildWorkerPoolSpec_ToProto(mapCtx *MapContext, in *krm.CloudBuildWork return out } +func PrivatePoolV1Config_NetworkConfigStatus_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config_NetworkConfig) *krm.PrivatePoolV1Config_NetworkConfigStatus { + if in == nil { + return nil + } + out := &krm.PrivatePoolV1Config_NetworkConfigStatus{} + out.PeeredNetwork = LazyPtr(in.GetPeeredNetwork()) + out.EgressOption = Enum_FromProto(mapCtx, in.EgressOption) + out.PeeredNetworkIPRange = LazyPtr(in.GetPeeredNetworkIpRange()) + return out +} + func PrivatePoolV1Config_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config) *krm.PrivatePoolV1Config { if in == nil { return nil @@ -67,11 +82,11 @@ func PrivatePoolV1Config_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config out.NetworkConfig = PrivatePoolV1Config_NetworkConfig_ToProto(mapCtx, in.NetworkConfig) return out } -func PrivatePoolV1Config_NetworkConfig_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config_NetworkConfig) *krm.PrivatePoolV1Config_NetworkConfig { +func PrivatePoolV1Config_NetworkConfig_FromProto(mapCtx *MapContext, in *pb.PrivatePoolV1Config_NetworkConfig) *krm.PrivatePoolV1Config_NetworkConfigSpec { if in == nil { return nil } - out := &krm.PrivatePoolV1Config_NetworkConfig{} + out := &krm.PrivatePoolV1Config_NetworkConfigSpec{} out.PeeredNetworkRef = refv1beta1.ComputeNetworkRef{ External: in.GetPeeredNetwork(), } @@ -79,7 +94,7 @@ func PrivatePoolV1Config_NetworkConfig_FromProto(mapCtx *MapContext, in *pb.Priv out.PeeredNetworkIPRange = LazyPtr(in.GetPeeredNetworkIpRange()) return out } -func PrivatePoolV1Config_NetworkConfig_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config_NetworkConfig) *pb.PrivatePoolV1Config_NetworkConfig { +func PrivatePoolV1Config_NetworkConfig_ToProto(mapCtx *MapContext, in *krm.PrivatePoolV1Config_NetworkConfigSpec) *pb.PrivatePoolV1Config_NetworkConfig { if in == nil { return nil } diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index dbe2301dc7..94c00dde23 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -37,9 +37,8 @@ status: etag: abcdef123456 networkConfig: egressOption: NO_PUBLIC_EGRESS + peeredNetwork: projects/${projectId}/global/networks/computenetwork-${uniqueId} peeredNetworkIPRange: /29 - peeredNetworkRef: - external: projects/${projectId}/global/networks/computenetwork-${uniqueId} updateTime: "1970-01-01T00:00:00Z" workerConfig: diskSizeGb: 100 From 39d8b0e39efd5129e91e1b63e77d999815a631ea Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 21:41:17 +0000 Subject: [PATCH 059/101] docs: ComputeManagedSSLCertificate chore: make ready-pr Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../apis/compute/v1alpha1/register.go | 6 - .../compute/v1alpha1/zz_generated.deepcopy.go | 170 -------- .../computemanagedsslcertificate_types.go | 26 +- .../apis/compute/v1beta1/register.go | 6 + .../compute/v1beta1/zz_generated.deepcopy.go | 191 +++++++++ .../typed/compute/v1alpha1/compute_client.go | 5 - .../v1alpha1/fake/fake_compute_client.go | 4 - .../compute/v1alpha1/generated_expansion.go | 2 - .../typed/compute/v1beta1/compute_client.go | 5 + .../computemanagedsslcertificate.go | 42 +- .../v1beta1/fake/fake_compute_client.go | 4 + .../fake/fake_computemanagedsslcertificate.go | 50 +-- .../compute/v1beta1/generated_expansion.go | 2 + .../resource-reference/_toc.yaml | 2 + .../compute/computemanagedsslcertificate.md | 376 ++++++++++++++++++ .../resource-reference/overview.md | 4 + .../compute_computemanagedsslcertificate.tmpl | 54 +++ 17 files changed, 706 insertions(+), 243 deletions(-) rename pkg/clients/generated/apis/compute/{v1alpha1 => v1beta1}/computemanagedsslcertificate_types.go (93%) rename pkg/clients/generated/client/clientset/versioned/typed/compute/{v1alpha1 => v1beta1}/computemanagedsslcertificate.go (76%) rename pkg/clients/generated/client/clientset/versioned/typed/compute/{v1alpha1 => v1beta1}/fake/fake_computemanagedsslcertificate.go (66%) create mode 100644 scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md create mode 100644 scripts/generate-google3-docs/resource-reference/templates/compute_computemanagedsslcertificate.tmpl diff --git a/pkg/clients/generated/apis/compute/v1alpha1/register.go b/pkg/clients/generated/apis/compute/v1alpha1/register.go index 537b62c954..f2eb74b21c 100644 --- a/pkg/clients/generated/apis/compute/v1alpha1/register.go +++ b/pkg/clients/generated/apis/compute/v1alpha1/register.go @@ -101,12 +101,6 @@ var ( Kind: reflect.TypeOf(ComputeMachineImage{}).Name(), } - ComputeManagedSSLCertificateGVK = schema.GroupVersionKind{ - Group: SchemeGroupVersion.Group, - Version: SchemeGroupVersion.Version, - Kind: reflect.TypeOf(ComputeManagedSSLCertificate{}).Name(), - } - ComputeNetworkEndpointGVK = schema.GroupVersionKind{ Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, diff --git a/pkg/clients/generated/apis/compute/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/compute/v1alpha1/zz_generated.deepcopy.go index 3dc92d949d..aa93744130 100644 --- a/pkg/clients/generated/apis/compute/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/compute/v1alpha1/zz_generated.deepcopy.go @@ -1337,155 +1337,6 @@ func (in *ComputeMachineImageStatus) DeepCopy() *ComputeMachineImageStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComputeManagedSSLCertificate) DeepCopyInto(out *ComputeManagedSSLCertificate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificate. -func (in *ComputeManagedSSLCertificate) DeepCopy() *ComputeManagedSSLCertificate { - if in == nil { - return nil - } - out := new(ComputeManagedSSLCertificate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComputeManagedSSLCertificate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComputeManagedSSLCertificateList) DeepCopyInto(out *ComputeManagedSSLCertificateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComputeManagedSSLCertificate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateList. -func (in *ComputeManagedSSLCertificateList) DeepCopy() *ComputeManagedSSLCertificateList { - if in == nil { - return nil - } - out := new(ComputeManagedSSLCertificateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComputeManagedSSLCertificateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComputeManagedSSLCertificateSpec) DeepCopyInto(out *ComputeManagedSSLCertificateSpec) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.Managed != nil { - in, out := &in.Managed, &out.Managed - *out = new(ManagedsslcertificateManaged) - (*in).DeepCopyInto(*out) - } - out.ProjectRef = in.ProjectRef - if in.ResourceID != nil { - in, out := &in.ResourceID, &out.ResourceID - *out = new(string) - **out = **in - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateSpec. -func (in *ComputeManagedSSLCertificateSpec) DeepCopy() *ComputeManagedSSLCertificateSpec { - if in == nil { - return nil - } - out := new(ComputeManagedSSLCertificateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComputeManagedSSLCertificateStatus) DeepCopyInto(out *ComputeManagedSSLCertificateStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]k8sv1alpha1.Condition, len(*in)) - copy(*out, *in) - } - if in.CertificateId != nil { - in, out := &in.CertificateId, &out.CertificateId - *out = new(int64) - **out = **in - } - if in.CreationTimestamp != nil { - in, out := &in.CreationTimestamp, &out.CreationTimestamp - *out = new(string) - **out = **in - } - if in.ExpireTime != nil { - in, out := &in.ExpireTime, &out.ExpireTime - *out = new(string) - **out = **in - } - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.SelfLink != nil { - in, out := &in.SelfLink, &out.SelfLink - *out = new(string) - **out = **in - } - if in.SubjectAlternativeNames != nil { - in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateStatus. -func (in *ComputeManagedSSLCertificateStatus) DeepCopy() *ComputeManagedSSLCertificateStatus { - if in == nil { - return nil - } - out := new(ComputeManagedSSLCertificateStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComputeNetworkEndpoint) DeepCopyInto(out *ComputeNetworkEndpoint) { *out = *in @@ -2924,27 +2775,6 @@ func (in *MachineimageMachineImageEncryptionKey) DeepCopy() *MachineimageMachine return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedsslcertificateManaged) DeepCopyInto(out *ManagedsslcertificateManaged) { - *out = *in - if in.Domains != nil { - in, out := &in.Domains, &out.Domains - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedsslcertificateManaged. -func (in *ManagedsslcertificateManaged) DeepCopy() *ManagedsslcertificateManaged { - if in == nil { - return nil - } - out := new(ManagedsslcertificateManaged) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkfirewallpolicyruleLayer4Configs) DeepCopyInto(out *NetworkfirewallpolicyruleLayer4Configs) { *out = *in diff --git a/pkg/clients/generated/apis/compute/v1alpha1/computemanagedsslcertificate_types.go b/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go similarity index 93% rename from pkg/clients/generated/apis/compute/v1alpha1/computemanagedsslcertificate_types.go rename to pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go index 0befa241e7..4b672876a4 100644 --- a/pkg/clients/generated/apis/compute/v1alpha1/computemanagedsslcertificate_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go @@ -28,7 +28,7 @@ // that future versions of the go-client may include breaking changes. // Please try it out and give us feedback! -package v1alpha1 +package v1beta1 import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" @@ -64,10 +64,7 @@ type ComputeManagedSSLCertificateSpec struct { Type *string `json:"type,omitempty"` } -type ComputeManagedSSLCertificateStatus struct { - /* Conditions represent the latest available observations of the - ComputeManagedSSLCertificate's current state. */ - Conditions []v1alpha1.Condition `json:"conditions,omitempty"` +type ManagedsslcertificateObservedStateStatus struct { /* The unique identifier for the resource. */ // +optional CertificateId *int64 `json:"certificateId,omitempty"` @@ -80,10 +77,6 @@ type ComputeManagedSSLCertificateStatus struct { // +optional ExpireTime *string `json:"expireTime,omitempty"` - /* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */ - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - // +optional SelfLink *string `json:"selfLink,omitempty"` @@ -92,11 +85,24 @@ type ComputeManagedSSLCertificateStatus struct { SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty"` } +type ComputeManagedSSLCertificateStatus struct { + /* Conditions represent the latest available observations of the + ComputeManagedSSLCertificate's current state. */ + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` + /* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */ + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + /* The observed state of the underlying GCP resource. */ + // +optional + ObservedState *ManagedsslcertificateObservedStateStatus `json:"observedState,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories=gcp,shortName=gcpcomputemanagedsslcertificate;gcpcomputemanagedsslcertificates // +kubebuilder:subresource:status -// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=alpha";"cnrm.cloud.google.com/system=true";"cnrm.cloud.google.com/tf2crd=true" +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true";"cnrm.cloud.google.com/tf2crd=true" // +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" // +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" // +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" diff --git a/pkg/clients/generated/apis/compute/v1beta1/register.go b/pkg/clients/generated/apis/compute/v1beta1/register.go index 0f159186b6..64f6892c32 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/register.go +++ b/pkg/clients/generated/apis/compute/v1beta1/register.go @@ -167,6 +167,12 @@ var ( Kind: reflect.TypeOf(ComputeInterconnectAttachment{}).Name(), } + ComputeManagedSSLCertificateGVK = schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: reflect.TypeOf(ComputeManagedSSLCertificate{}).Name(), + } + ComputeNetworkGVK = schema.GroupVersionKind{ Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, diff --git a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go index 4199659dae..cd8378fb9e 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go @@ -4666,6 +4666,135 @@ func (in *ComputeInterconnectAttachmentStatus) DeepCopy() *ComputeInterconnectAt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeManagedSSLCertificate) DeepCopyInto(out *ComputeManagedSSLCertificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificate. +func (in *ComputeManagedSSLCertificate) DeepCopy() *ComputeManagedSSLCertificate { + if in == nil { + return nil + } + out := new(ComputeManagedSSLCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeManagedSSLCertificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeManagedSSLCertificateList) DeepCopyInto(out *ComputeManagedSSLCertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComputeManagedSSLCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateList. +func (in *ComputeManagedSSLCertificateList) DeepCopy() *ComputeManagedSSLCertificateList { + if in == nil { + return nil + } + out := new(ComputeManagedSSLCertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeManagedSSLCertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeManagedSSLCertificateSpec) DeepCopyInto(out *ComputeManagedSSLCertificateSpec) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(ManagedsslcertificateManaged) + (*in).DeepCopyInto(*out) + } + out.ProjectRef = in.ProjectRef + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateSpec. +func (in *ComputeManagedSSLCertificateSpec) DeepCopy() *ComputeManagedSSLCertificateSpec { + if in == nil { + return nil + } + out := new(ComputeManagedSSLCertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeManagedSSLCertificateStatus) DeepCopyInto(out *ComputeManagedSSLCertificateStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1alpha1.Condition, len(*in)) + copy(*out, *in) + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ObservedState != nil { + in, out := &in.ObservedState, &out.ObservedState + *out = new(ManagedsslcertificateObservedStateStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeManagedSSLCertificateStatus. +func (in *ComputeManagedSSLCertificateStatus) DeepCopy() *ComputeManagedSSLCertificateStatus { + if in == nil { + return nil + } + out := new(ComputeManagedSSLCertificateStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComputeNetwork) DeepCopyInto(out *ComputeNetwork) { *out = *in @@ -13044,6 +13173,68 @@ func (in *InterconnectattachmentPrivateInterconnectInfoStatus) DeepCopy() *Inter return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedsslcertificateManaged) DeepCopyInto(out *ManagedsslcertificateManaged) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedsslcertificateManaged. +func (in *ManagedsslcertificateManaged) DeepCopy() *ManagedsslcertificateManaged { + if in == nil { + return nil + } + out := new(ManagedsslcertificateManaged) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedsslcertificateObservedStateStatus) DeepCopyInto(out *ManagedsslcertificateObservedStateStatus) { + *out = *in + if in.CertificateId != nil { + in, out := &in.CertificateId, &out.CertificateId + *out = new(int64) + **out = **in + } + if in.CreationTimestamp != nil { + in, out := &in.CreationTimestamp, &out.CreationTimestamp + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(string) + **out = **in + } + if in.SelfLink != nil { + in, out := &in.SelfLink, &out.SelfLink + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedsslcertificateObservedStateStatus. +func (in *ManagedsslcertificateObservedStateStatus) DeepCopy() *ManagedsslcertificateObservedStateStatus { + if in == nil { + return nil + } + out := new(ManagedsslcertificateObservedStateStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodegroupAutoscalingPolicy) DeepCopyInto(out *NodegroupAutoscalingPolicy) { *out = *in diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/compute_client.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/compute_client.go index 7fae32cd48..23931da476 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/compute_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/compute_client.go @@ -39,7 +39,6 @@ type ComputeV1alpha1Interface interface { ComputeGlobalNetworkEndpointGroupsGetter ComputeInstanceGroupNamedPortsGetter ComputeMachineImagesGetter - ComputeManagedSSLCertificatesGetter ComputeNetworkEndpointsGetter ComputeNetworkFirewallPolicyRulesGetter ComputeNetworkPeeringRoutesConfigsGetter @@ -90,10 +89,6 @@ func (c *ComputeV1alpha1Client) ComputeMachineImages(namespace string) ComputeMa return newComputeMachineImages(c, namespace) } -func (c *ComputeV1alpha1Client) ComputeManagedSSLCertificates(namespace string) ComputeManagedSSLCertificateInterface { - return newComputeManagedSSLCertificates(c, namespace) -} - func (c *ComputeV1alpha1Client) ComputeNetworkEndpoints(namespace string) ComputeNetworkEndpointInterface { return newComputeNetworkEndpoints(c, namespace) } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_compute_client.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_compute_client.go index 075f6110a5..b5a7066e74 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_compute_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_compute_client.go @@ -63,10 +63,6 @@ func (c *FakeComputeV1alpha1) ComputeMachineImages(namespace string) v1alpha1.Co return &FakeComputeMachineImages{c, namespace} } -func (c *FakeComputeV1alpha1) ComputeManagedSSLCertificates(namespace string) v1alpha1.ComputeManagedSSLCertificateInterface { - return &FakeComputeManagedSSLCertificates{c, namespace} -} - func (c *FakeComputeV1alpha1) ComputeNetworkEndpoints(namespace string) v1alpha1.ComputeNetworkEndpointInterface { return &FakeComputeNetworkEndpoints{c, namespace} } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/generated_expansion.go index 7e20ee7a63..ba0888f7bc 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/generated_expansion.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/generated_expansion.go @@ -37,8 +37,6 @@ type ComputeInstanceGroupNamedPortExpansion interface{} type ComputeMachineImageExpansion interface{} -type ComputeManagedSSLCertificateExpansion interface{} - type ComputeNetworkEndpointExpansion interface{} type ComputeNetworkFirewallPolicyRuleExpansion interface{} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/compute_client.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/compute_client.go index cb0bc28c3e..237a11dfe0 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/compute_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/compute_client.go @@ -50,6 +50,7 @@ type ComputeV1beta1Interface interface { ComputeInstanceGroupManagersGetter ComputeInstanceTemplatesGetter ComputeInterconnectAttachmentsGetter + ComputeManagedSSLCertificatesGetter ComputeNetworksGetter ComputeNetworkEndpointGroupsGetter ComputeNetworkFirewallPoliciesGetter @@ -169,6 +170,10 @@ func (c *ComputeV1beta1Client) ComputeInterconnectAttachments(namespace string) return newComputeInterconnectAttachments(c, namespace) } +func (c *ComputeV1beta1Client) ComputeManagedSSLCertificates(namespace string) ComputeManagedSSLCertificateInterface { + return newComputeManagedSSLCertificates(c, namespace) +} + func (c *ComputeV1beta1Client) ComputeNetworks(namespace string) ComputeNetworkInterface { return newComputeNetworks(c, namespace) } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/computemanagedsslcertificate.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/computemanagedsslcertificate.go similarity index 76% rename from pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/computemanagedsslcertificate.go rename to pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/computemanagedsslcertificate.go index cec24a5b2f..274a566a89 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/computemanagedsslcertificate.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/computemanagedsslcertificate.go @@ -19,13 +19,13 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "context" "time" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" scheme "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -41,15 +41,15 @@ type ComputeManagedSSLCertificatesGetter interface { // ComputeManagedSSLCertificateInterface has methods to work with ComputeManagedSSLCertificate resources. type ComputeManagedSSLCertificateInterface interface { - Create(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (*v1alpha1.ComputeManagedSSLCertificate, error) - Update(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1alpha1.ComputeManagedSSLCertificate, error) - UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1alpha1.ComputeManagedSSLCertificate, error) + Create(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (*v1beta1.ComputeManagedSSLCertificate, error) + Update(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1beta1.ComputeManagedSSLCertificate, error) + UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1beta1.ComputeManagedSSLCertificate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ComputeManagedSSLCertificate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ComputeManagedSSLCertificateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ComputeManagedSSLCertificate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ComputeManagedSSLCertificateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ComputeManagedSSLCertificate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ComputeManagedSSLCertificate, err error) ComputeManagedSSLCertificateExpansion } @@ -60,7 +60,7 @@ type computeManagedSSLCertificates struct { } // newComputeManagedSSLCertificates returns a ComputeManagedSSLCertificates -func newComputeManagedSSLCertificates(c *ComputeV1alpha1Client, namespace string) *computeManagedSSLCertificates { +func newComputeManagedSSLCertificates(c *ComputeV1beta1Client, namespace string) *computeManagedSSLCertificates { return &computeManagedSSLCertificates{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newComputeManagedSSLCertificates(c *ComputeV1alpha1Client, namespace string } // Get takes name of the computeManagedSSLCertificate, and returns the corresponding computeManagedSSLCertificate object, and an error if there is any. -func (c *computeManagedSSLCertificates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { - result = &v1alpha1.ComputeManagedSSLCertificate{} +func (c *computeManagedSSLCertificates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { + result = &v1beta1.ComputeManagedSSLCertificate{} err = c.client.Get(). Namespace(c.ns). Resource("computemanagedsslcertificates"). @@ -81,12 +81,12 @@ func (c *computeManagedSSLCertificates) Get(ctx context.Context, name string, op } // List takes label and field selectors, and returns the list of ComputeManagedSSLCertificates that match those selectors. -func (c *computeManagedSSLCertificates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ComputeManagedSSLCertificateList, err error) { +func (c *computeManagedSSLCertificates) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ComputeManagedSSLCertificateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ComputeManagedSSLCertificateList{} + result = &v1beta1.ComputeManagedSSLCertificateList{} err = c.client.Get(). Namespace(c.ns). Resource("computemanagedsslcertificates"). @@ -113,8 +113,8 @@ func (c *computeManagedSSLCertificates) Watch(ctx context.Context, opts v1.ListO } // Create takes the representation of a computeManagedSSLCertificate and creates it. Returns the server's representation of the computeManagedSSLCertificate, and an error, if there is any. -func (c *computeManagedSSLCertificates) Create(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { - result = &v1alpha1.ComputeManagedSSLCertificate{} +func (c *computeManagedSSLCertificates) Create(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { + result = &v1beta1.ComputeManagedSSLCertificate{} err = c.client.Post(). Namespace(c.ns). Resource("computemanagedsslcertificates"). @@ -126,8 +126,8 @@ func (c *computeManagedSSLCertificates) Create(ctx context.Context, computeManag } // Update takes the representation of a computeManagedSSLCertificate and updates it. Returns the server's representation of the computeManagedSSLCertificate, and an error, if there is any. -func (c *computeManagedSSLCertificates) Update(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { - result = &v1alpha1.ComputeManagedSSLCertificate{} +func (c *computeManagedSSLCertificates) Update(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { + result = &v1beta1.ComputeManagedSSLCertificate{} err = c.client.Put(). Namespace(c.ns). Resource("computemanagedsslcertificates"). @@ -141,8 +141,8 @@ func (c *computeManagedSSLCertificates) Update(ctx context.Context, computeManag // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *computeManagedSSLCertificates) UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { - result = &v1alpha1.ComputeManagedSSLCertificate{} +func (c *computeManagedSSLCertificates) UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { + result = &v1beta1.ComputeManagedSSLCertificate{} err = c.client.Put(). Namespace(c.ns). Resource("computemanagedsslcertificates"). @@ -183,8 +183,8 @@ func (c *computeManagedSSLCertificates) DeleteCollection(ctx context.Context, op } // Patch applies the patch and returns the patched computeManagedSSLCertificate. -func (c *computeManagedSSLCertificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { - result = &v1alpha1.ComputeManagedSSLCertificate{} +func (c *computeManagedSSLCertificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ComputeManagedSSLCertificate, err error) { + result = &v1beta1.ComputeManagedSSLCertificate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("computemanagedsslcertificates"). diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_compute_client.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_compute_client.go index a9d4902357..6506806c2d 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_compute_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_compute_client.go @@ -107,6 +107,10 @@ func (c *FakeComputeV1beta1) ComputeInterconnectAttachments(namespace string) v1 return &FakeComputeInterconnectAttachments{c, namespace} } +func (c *FakeComputeV1beta1) ComputeManagedSSLCertificates(namespace string) v1beta1.ComputeManagedSSLCertificateInterface { + return &FakeComputeManagedSSLCertificates{c, namespace} +} + func (c *FakeComputeV1beta1) ComputeNetworks(namespace string) v1beta1.ComputeNetworkInterface { return &FakeComputeNetworks{c, namespace} } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_computemanagedsslcertificate.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_computemanagedsslcertificate.go similarity index 66% rename from pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_computemanagedsslcertificate.go rename to pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_computemanagedsslcertificate.go index 1f3e894dc7..a118160699 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1alpha1/fake/fake_computemanagedsslcertificate.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/fake/fake_computemanagedsslcertificate.go @@ -24,7 +24,7 @@ package fake import ( "context" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" @@ -34,29 +34,29 @@ import ( // FakeComputeManagedSSLCertificates implements ComputeManagedSSLCertificateInterface type FakeComputeManagedSSLCertificates struct { - Fake *FakeComputeV1alpha1 + Fake *FakeComputeV1beta1 ns string } -var computemanagedsslcertificatesResource = v1alpha1.SchemeGroupVersion.WithResource("computemanagedsslcertificates") +var computemanagedsslcertificatesResource = v1beta1.SchemeGroupVersion.WithResource("computemanagedsslcertificates") -var computemanagedsslcertificatesKind = v1alpha1.SchemeGroupVersion.WithKind("ComputeManagedSSLCertificate") +var computemanagedsslcertificatesKind = v1beta1.SchemeGroupVersion.WithKind("ComputeManagedSSLCertificate") // Get takes name of the computeManagedSSLCertificate, and returns the corresponding computeManagedSSLCertificate object, and an error if there is any. -func (c *FakeComputeManagedSSLCertificates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { +func (c *FakeComputeManagedSSLCertificates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(computemanagedsslcertificatesResource, c.ns, name), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewGetAction(computemanagedsslcertificatesResource, c.ns, name), &v1beta1.ComputeManagedSSLCertificate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ComputeManagedSSLCertificate), err + return obj.(*v1beta1.ComputeManagedSSLCertificate), err } // List takes label and field selectors, and returns the list of ComputeManagedSSLCertificates that match those selectors. -func (c *FakeComputeManagedSSLCertificates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ComputeManagedSSLCertificateList, err error) { +func (c *FakeComputeManagedSSLCertificates) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ComputeManagedSSLCertificateList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(computemanagedsslcertificatesResource, computemanagedsslcertificatesKind, c.ns, opts), &v1alpha1.ComputeManagedSSLCertificateList{}) + Invokes(testing.NewListAction(computemanagedsslcertificatesResource, computemanagedsslcertificatesKind, c.ns, opts), &v1beta1.ComputeManagedSSLCertificateList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakeComputeManagedSSLCertificates) List(ctx context.Context, opts v1.Li if label == nil { label = labels.Everything() } - list := &v1alpha1.ComputeManagedSSLCertificateList{ListMeta: obj.(*v1alpha1.ComputeManagedSSLCertificateList).ListMeta} - for _, item := range obj.(*v1alpha1.ComputeManagedSSLCertificateList).Items { + list := &v1beta1.ComputeManagedSSLCertificateList{ListMeta: obj.(*v1beta1.ComputeManagedSSLCertificateList).ListMeta} + for _, item := range obj.(*v1beta1.ComputeManagedSSLCertificateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,43 +83,43 @@ func (c *FakeComputeManagedSSLCertificates) Watch(ctx context.Context, opts v1.L } // Create takes the representation of a computeManagedSSLCertificate and creates it. Returns the server's representation of the computeManagedSSLCertificate, and an error, if there is any. -func (c *FakeComputeManagedSSLCertificates) Create(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { +func (c *FakeComputeManagedSSLCertificates) Create(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.CreateOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(computemanagedsslcertificatesResource, c.ns, computeManagedSSLCertificate), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewCreateAction(computemanagedsslcertificatesResource, c.ns, computeManagedSSLCertificate), &v1beta1.ComputeManagedSSLCertificate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ComputeManagedSSLCertificate), err + return obj.(*v1beta1.ComputeManagedSSLCertificate), err } // Update takes the representation of a computeManagedSSLCertificate and updates it. Returns the server's representation of the computeManagedSSLCertificate, and an error, if there is any. -func (c *FakeComputeManagedSSLCertificates) Update(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { +func (c *FakeComputeManagedSSLCertificates) Update(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (result *v1beta1.ComputeManagedSSLCertificate, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(computemanagedsslcertificatesResource, c.ns, computeManagedSSLCertificate), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewUpdateAction(computemanagedsslcertificatesResource, c.ns, computeManagedSSLCertificate), &v1beta1.ComputeManagedSSLCertificate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ComputeManagedSSLCertificate), err + return obj.(*v1beta1.ComputeManagedSSLCertificate), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeComputeManagedSSLCertificates) UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1alpha1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1alpha1.ComputeManagedSSLCertificate, error) { +func (c *FakeComputeManagedSSLCertificates) UpdateStatus(ctx context.Context, computeManagedSSLCertificate *v1beta1.ComputeManagedSSLCertificate, opts v1.UpdateOptions) (*v1beta1.ComputeManagedSSLCertificate, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(computemanagedsslcertificatesResource, "status", c.ns, computeManagedSSLCertificate), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewUpdateSubresourceAction(computemanagedsslcertificatesResource, "status", c.ns, computeManagedSSLCertificate), &v1beta1.ComputeManagedSSLCertificate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ComputeManagedSSLCertificate), err + return obj.(*v1beta1.ComputeManagedSSLCertificate), err } // Delete takes name of the computeManagedSSLCertificate and deletes it. Returns an error if one occurs. func (c *FakeComputeManagedSSLCertificates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(computemanagedsslcertificatesResource, c.ns, name, opts), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewDeleteActionWithOptions(computemanagedsslcertificatesResource, c.ns, name, opts), &v1beta1.ComputeManagedSSLCertificate{}) return err } @@ -128,17 +128,17 @@ func (c *FakeComputeManagedSSLCertificates) Delete(ctx context.Context, name str func (c *FakeComputeManagedSSLCertificates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(computemanagedsslcertificatesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.ComputeManagedSSLCertificateList{}) + _, err := c.Fake.Invokes(action, &v1beta1.ComputeManagedSSLCertificateList{}) return err } // Patch applies the patch and returns the patched computeManagedSSLCertificate. -func (c *FakeComputeManagedSSLCertificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ComputeManagedSSLCertificate, err error) { +func (c *FakeComputeManagedSSLCertificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ComputeManagedSSLCertificate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(computemanagedsslcertificatesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ComputeManagedSSLCertificate{}) + Invokes(testing.NewPatchSubresourceAction(computemanagedsslcertificatesResource, c.ns, name, pt, data, subresources...), &v1beta1.ComputeManagedSSLCertificate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ComputeManagedSSLCertificate), err + return obj.(*v1beta1.ComputeManagedSSLCertificate), err } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/generated_expansion.go index 75c28c8425..468b92914c 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/generated_expansion.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/compute/v1beta1/generated_expansion.go @@ -59,6 +59,8 @@ type ComputeInstanceTemplateExpansion interface{} type ComputeInterconnectAttachmentExpansion interface{} +type ComputeManagedSSLCertificateExpansion interface{} + type ComputeNetworkExpansion interface{} type ComputeNetworkEndpointGroupExpansion interface{} diff --git a/scripts/generate-google3-docs/resource-reference/_toc.yaml b/scripts/generate-google3-docs/resource-reference/_toc.yaml index 37649fbe17..b901863903 100644 --- a/scripts/generate-google3-docs/resource-reference/_toc.yaml +++ b/scripts/generate-google3-docs/resource-reference/_toc.yaml @@ -287,6 +287,8 @@ toc: path: /config-connector/docs/reference/resource-docs/compute/computeinstancetemplate.md - title: "ComputeInterconnectAttachment" path: /config-connector/docs/reference/resource-docs/compute/computeinterconnectattachment.md + - title: "ComputeManagedSSLCertificate" + path: /config-connector/docs/reference/resource-docs/compute/computemanagedsslcertificate.md - title: "ComputeNetwork" path: /config-connector/docs/reference/resource-docs/compute/computenetwork.md - title: "ComputeNetworkEndpointGroup" diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md new file mode 100644 index 0000000000..bd73364645 --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md @@ -0,0 +1,376 @@ +{# AUTOGENERATED. DO NOT EDIT. #} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}ComputeManagedSSLCertificate{% endblock %} +{% block body %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyValue
{{gcp_name_short}} Service NameCompute Engine
{{gcp_name_short}} Service Documentation/compute/docs/
{{gcp_name_short}} REST Resource Namev1.sslCertificates
{{gcp_name_short}} REST Resource Documentation/compute/docs/reference/rest/v1/sslCertificates
{{product_name_short}} Resource Short Namesgcpcomputemanagedsslcertificate
gcpcomputemanagedsslcertificates
computemanagedsslcertificate
{{product_name_short}} Service Namecompute.googleapis.com
{{product_name_short}} Resource Fully Qualified Namecomputemanagedsslcertificates.compute.cnrm.cloud.google.com
Can Be Referenced by IAMPolicy/IAMPolicyMemberNo
{{product_name_short}} Default Average Reconcile Interval In Seconds600
+ +## Custom Resource Definition Properties + + +### Annotations + + + + + + + + + + + +
Fields
cnrm.cloud.google.com/state-into-spec
+ + +### Spec +#### Schema +```yaml +description: string +managed: + domains: + - string +projectRef: + external: string + name: string + namespace: string +resourceID: string +type: string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
+

description

+

Optional

+
+

string

+

{% verbatim %}Immutable. An optional description of this resource.{% endverbatim %}

+
+

managed

+

Optional

+
+

object

+

{% verbatim %}Immutable. Properties relevant to a managed certificate. These will be used if the +certificate is managed (as indicated by a value of 'MANAGED' in 'type').{% endverbatim %}

+
+

managed.domains

+

Required*

+
+

list (string)

+

{% verbatim %}Immutable. Domains for which a managed SSL certificate will be valid. Currently, +there can be up to 100 domains in this list.{% endverbatim %}

+
+

managed.domains[]

+

Required*

+
+

string

+

{% verbatim %}{% endverbatim %}

+
+

projectRef

+

Required

+
+

object

+

{% verbatim %}The project that this resource belongs to.{% endverbatim %}

+
+

projectRef.external

+

Optional

+
+

string

+

{% verbatim %}Allowed value: The `name` field of a `Project` resource.{% endverbatim %}

+
+

projectRef.name

+

Optional

+
+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+
+

projectRef.namespace

+

Optional

+
+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+
+

resourceID

+

Optional

+
+

string

+

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+
+

type

+

Optional

+
+

string

+

{% verbatim %}Immutable. Enum field whose value is always 'MANAGED' - used to signal to the API +which type this is. Default value: "MANAGED" Possible values: ["MANAGED"].{% endverbatim %}

+
+ + +

* Field is required when parent field is specified

+ + +### Status +#### Schema +```yaml +conditions: +- lastTransitionTime: string + message: string + reason: string + status: string + type: string +observedGeneration: integer +observedState: + certificateId: integer + creationTimestamp: string + expireTime: string + selfLink: string + subjectAlternativeNames: + - string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
conditions +

list (object)

+

{% verbatim %}Conditions represent the latest available observation of the resource's current state.{% endverbatim %}

+
conditions[] +

object

+

{% verbatim %}{% endverbatim %}

+
conditions[].lastTransitionTime +

string

+

{% verbatim %}Last time the condition transitioned from one status to another.{% endverbatim %}

+
conditions[].message +

string

+

{% verbatim %}Human-readable message indicating details about last transition.{% endverbatim %}

+
conditions[].reason +

string

+

{% verbatim %}Unique, one-word, CamelCase reason for the condition's last transition.{% endverbatim %}

+
conditions[].status +

string

+

{% verbatim %}Status is the status of the condition. Can be True, False, Unknown.{% endverbatim %}

+
conditions[].type +

string

+

{% verbatim %}Type is the type of the condition.{% endverbatim %}

+
observedGeneration +

integer

+

{% verbatim %}ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource.{% endverbatim %}

+
observedState +

object

+

{% verbatim %}The observed state of the underlying GCP resource.{% endverbatim %}

+
observedState.certificateId +

integer

+

{% verbatim %}The unique identifier for the resource.{% endverbatim %}

+
observedState.creationTimestamp +

string

+

{% verbatim %}Creation timestamp in RFC3339 text format.{% endverbatim %}

+
observedState.expireTime +

string

+

{% verbatim %}Expire time of the certificate in RFC3339 text format.{% endverbatim %}

+
observedState.selfLink +

string

+

{% verbatim %}{% endverbatim %}

+
observedState.subjectAlternativeNames +

list (string)

+

{% verbatim %}Domains associated with the certificate via Subject Alternative Name.{% endverbatim %}

+
observedState.subjectAlternativeNames[] +

string

+

{% verbatim %}{% endverbatim %}

+
+ +## Sample YAML(s) + +### Typical Use Case +```yaml +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeManagedSSLCertificate +metadata: + name: computemanagedsslcertificate-sample +spec: + managed: + domains: + - sslcert.tf-test.club. + projectRef: + external: ${PROJECT_ID?} + resourceID: computemanagedsslcertificate-sample +``` + + +Note: If you have any trouble with instantiating the resource, refer to Troubleshoot Config Connector. + +{% endblock %} diff --git a/scripts/generate-google3-docs/resource-reference/overview.md b/scripts/generate-google3-docs/resource-reference/overview.md index a2ad258ddd..e0b20365ad 100644 --- a/scripts/generate-google3-docs/resource-reference/overview.md +++ b/scripts/generate-google3-docs/resource-reference/overview.md @@ -226,6 +226,10 @@ issues for {{product_name_short}}. {{compute_name}} ComputeInterconnectAttachment + + {{compute_name}} + ComputeManagedSSLCertificate + {{compute_name}} ComputeNetwork diff --git a/scripts/generate-google3-docs/resource-reference/templates/compute_computemanagedsslcertificate.tmpl b/scripts/generate-google3-docs/resource-reference/templates/compute_computemanagedsslcertificate.tmpl new file mode 100644 index 0000000000..be229fce97 --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/templates/compute_computemanagedsslcertificate.tmpl @@ -0,0 +1,54 @@ +{{template "headercomment.tmpl" .}} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}{{ .Kind}}{% endblock %} +{% block body %} +{{template "alphadisclaimer.tmpl" .}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{{template "iamsupport.tmpl" .}} + + + + + +
PropertyValue
{{"{{gcp_name_short}}"}} Service NameCompute Engine
{{"{{gcp_name_short}}"}} Service Documentation/compute/docs/
{{"{{gcp_name_short}}"}} REST Resource Namev1.sslCertificates
{{"{{gcp_name_short}}"}} REST Resource Documentation/compute/docs/reference/rest/v1/sslCertificates
{{"{{product_name_short}}"}} Resource Short Names{{ .ShortNames}}
{{"{{product_name_short}}"}} Service Namecompute.googleapis.com
{{"{{product_name_short}}"}} Resource Fully Qualified Name{{ .FullyQualifiedName}}
{{"{{product_name_short}}"}} Default Average Reconcile Interval In Seconds{{ .DefaultReconcileInterval}}
+ +{{template "resource.tmpl" .}} +{{template "endnote.tmpl" .}} +{% endblock %} From 6414f9cfc1dbc0e419ad63be5f483ef22f50a995 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 21:42:52 +0000 Subject: [PATCH 060/101] tests: add samples Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- ..._v1beta1_computemanagedsslcertificate.yaml | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 config/samples/resources/computemanagedsslcertificate/compute_v1beta1_computemanagedsslcertificate.yaml diff --git a/config/samples/resources/computemanagedsslcertificate/compute_v1beta1_computemanagedsslcertificate.yaml b/config/samples/resources/computemanagedsslcertificate/compute_v1beta1_computemanagedsslcertificate.yaml new file mode 100644 index 0000000000..8a3d47e747 --- /dev/null +++ b/config/samples/resources/computemanagedsslcertificate/compute_v1beta1_computemanagedsslcertificate.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeManagedSSLCertificate +metadata: + name: computemanagedsslcertificate-sample +spec: + managed: + domains: + - sslcert.tf-test.club. + projectRef: + external: ${PROJECT_ID?} + resourceID: computemanagedsslcertificate-sample \ No newline at end of file From 798d146f7ecf7d2977cc520b9fed854fd21f2840 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 22:36:50 +0000 Subject: [PATCH 061/101] tests: temporarily add exception Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- tests/apichecks/testdata/exceptions/acronyms.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 84649cd7a1..e8bc9c623c 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -156,7 +156,8 @@ [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.cloudRouterIpAddress" should be ".status.cloudRouterIPAddress" [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.customerRouterIpAddress" should be ".status.customerRouterIPAddress" [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.googleReferenceId" should be ".status.googleReferenceID" -[acronyms] crd=computemanagedsslcertificates.compute.cnrm.cloud.google.com version=v1alpha1: field ".status.certificateId" should be ".status.certificateID" +[acronyms] crd=computemanagedsslcertificates.compute.cnrm.cloud.google.com version=v1alpha1: field ".status.observedState.certificateId" should be ".status.observedState.certificateID" +[acronyms] crd=computemanagedsslcertificates.compute.cnrm.cloud.google.com version=v1beta1: field ".status.observedState.certificateId" should be ".status.observedState.certificateID" [acronyms] crd=computenetworkfirewallpolicies.compute.cnrm.cloud.google.com version=v1beta1: field ".status.networkFirewallPolicyId" should be ".status.networkFirewallPolicyID" [acronyms] crd=computenetworkfirewallpolicies.compute.cnrm.cloud.google.com version=v1beta1: field ".status.selfLinkWithId" should be ".status.selfLinkWithID" [acronyms] crd=computenetworkfirewallpolicyrules.compute.cnrm.cloud.google.com version=v1alpha1: field ".spec.match.destIpRanges" should be ".spec.match.destIPRanges" From 524d13f793fdcedf886cfa53053388ea05f46fe2 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Mon, 24 Jun 2024 23:59:34 +0000 Subject: [PATCH 062/101] tests: resourceskeleton Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- pkg/resourceskeleton/testdata/asset-skeleton.yaml | 14 ++++++++++++++ pkg/resourceskeleton/testdata/uri-skeleton.yaml | 15 ++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/pkg/resourceskeleton/testdata/asset-skeleton.yaml b/pkg/resourceskeleton/testdata/asset-skeleton.yaml index f6160f2dcc..6e95428c9a 100644 --- a/pkg/resourceskeleton/testdata/asset-skeleton.yaml +++ b/pkg/resourceskeleton/testdata/asset-skeleton.yaml @@ -1219,3 +1219,17 @@ projectRef: external: kcc-test resourceConfigId: CloudIDSEndpoint +- resourceConfigId: ComputeManagedSSLCertificate + asset: + ancestors: + - projects/1234567890 + name: //compute.googleapis.com/projects/kcc-test/global/sslCertificates/computemanagedsslcertificate-wj6kvvmztb67xscbzd5a + asset_type: compute.googleapis.com/ManagedSslCertificate + expectedSkeleton: + apiVersion: compute.cnrm.cloud.google.com/v1beta1 + kind: ComputeManagedSSLCertificate + metadata: + name: computemanagedsslcertificate-wj6kvvmztb67xscbzd5a + spec: + projectRef: + external: kcc-test diff --git a/pkg/resourceskeleton/testdata/uri-skeleton.yaml b/pkg/resourceskeleton/testdata/uri-skeleton.yaml index e85eac0ad6..3e8019b425 100644 --- a/pkg/resourceskeleton/testdata/uri-skeleton.yaml +++ b/pkg/resourceskeleton/testdata/uri-skeleton.yaml @@ -1152,4 +1152,17 @@ external: kcc-test region: us-west2-a ResourceConfigId: CloudIDSEndpoint - URI: "https://ids.googleapis.com/projects/kcc-test/locations/us-west2-a/endpoints/cloudidsendpoint-ppdtifqxy4vqi4ezx7la" \ No newline at end of file + URI: "https://ids.googleapis.com/projects/kcc-test/locations/us-west2-a/endpoints/cloudidsendpoint-ppdtifqxy4vqi4ezx7la" +- ExpectedSkeleton: + apiVersion: compute.cnrm.cloud.google.com/v1beta1 + kind: ComputeManagedSSLCertificate + metadata: + name: computemanagedsslcertificate-wj6kvvmztb67xscbzd5a + spec: + managed: + domains: + - sslcert.kcc-test.club. + projectRef: + external: kcc-test + ResourceConfigId: ComputeManagedSSLCertificate + URI: "https://www.googleapis.com/compute/v1/projects/kcc-test/global/sslCertificates/computemanagedsslcertificate-wj6kvvmztb67xscbzd5a" From ab3317d077db42720ef7de43a87c3665abbabec5 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Tue, 25 Jun 2024 17:53:22 +0000 Subject: [PATCH 063/101] chore: update fixtures test Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- pkg/test/resourcefixture/contexts/compute_context.go | 6 +++++- ...d_object_computemanagedsslcertificate.golden.yaml} | 11 ++++++----- .../computemanagedsslcertificate}/_http.log | 4 ++-- .../computemanagedsslcertificate}/create.yaml | 6 +++--- tests/e2e/normalize.go | 7 ++++--- 5 files changed, 20 insertions(+), 14 deletions(-) rename pkg/test/resourcefixture/testdata/basic/compute/{v1alpha1/computemanagedsslcertificate/managedsslcertificate/_generated_object_managedsslcertificate.golden.yaml => v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml} (68%) rename pkg/test/resourcefixture/testdata/basic/compute/{v1alpha1/computemanagedsslcertificate/managedsslcertificate => v1beta1/computemanagedsslcertificate}/_http.log (98%) rename pkg/test/resourcefixture/testdata/basic/compute/{v1alpha1/computemanagedsslcertificate/managedsslcertificate => v1beta1/computemanagedsslcertificate}/create.yaml (87%) diff --git a/pkg/test/resourcefixture/contexts/compute_context.go b/pkg/test/resourcefixture/contexts/compute_context.go index ba5d4540af..6706858b89 100644 --- a/pkg/test/resourcefixture/contexts/compute_context.go +++ b/pkg/test/resourcefixture/contexts/compute_context.go @@ -34,7 +34,11 @@ func init() { ResourceKind: "ComputeExternalVPNGateway", SkipUpdate: true, } - + resourceContextMap["computemanagedsslcertificate"] = ResourceContext{ + ResourceKind: "ComputeManagedSSLCertificate", + // This resource doesn't support update. + SkipUpdate: true, + } resourceContextMap["cloudfunctioncomputeregionnetworkendpointgroup"] = ResourceContext{ ResourceKind: "ComputeRegionNetworkEndpointGroup", // The GCP resource for ComputeRegionNetworkEndpointGroup doesn't diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_generated_object_managedsslcertificate.golden.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml similarity index 68% rename from pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_generated_object_managedsslcertificate.golden.yaml rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml index 8abb0be870..424fbbb6e2 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_generated_object_managedsslcertificate.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml @@ -1,4 +1,4 @@ -apiVersion: compute.cnrm.cloud.google.com/v1alpha1 +apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeManagedSSLCertificate metadata: annotations: @@ -15,18 +15,19 @@ metadata: spec: managed: domains: - - sslcert.kcc-test.club. + - sslcert.fixture.club. projectRef: external: ${projectId} resourceID: computemanagedsslcertificate-${uniqueId} status: - certificateId: 1111011111111110000 conditions: - lastTransitionTime: "1970-01-01T00:00:00Z" message: The resource is up to date reason: UpToDate status: "True" type: Ready - creationTimestamp: "1970-01-01T00:00:00Z" observedGeneration: 1 - selfLink: https://compute.googleapis.com/compute/v1/projects/${projectId}/global/sslCertificates/computemanagedsslcertificate-${uniqueId} + observedState: + certificateId: "1.719337333063698e+18" + creationTimestamp: "1970-01-01T00:00:00Z" + selfLink: https://compute.googleapis.com/compute/v1/projects/${projectId}/global/sslCertificates/computemanagedsslcertificate-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_http.log b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_http.log similarity index 98% rename from pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_http.log rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_http.log index 0d2a6aec96..b898192f60 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_http.log @@ -36,7 +36,7 @@ User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 t { "managed": { "domains": [ - "sslcert.kcc-test.club." + "sslcert.fixture.club." ] }, "name": "computemanagedsslcertificate-${uniqueId}", @@ -92,7 +92,7 @@ X-Xss-Protection: 0 "kind": "compute#sslCertificate", "managed": { "domains": [ - "sslcert.kcc-test.club." + "sslcert.fixture.club." ] }, "name": "computemanagedsslcertificate-${uniqueId}", diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/create.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/create.yaml similarity index 87% rename from pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/create.yaml rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/create.yaml index dc2ab1a279..571b3aaa94 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1alpha1/computemanagedsslcertificate/managedsslcertificate/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/create.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,14 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: compute.cnrm.cloud.google.com/v1alpha1 +apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeManagedSSLCertificate metadata: name: computemanagedsslcertificate-${uniqueId} spec: managed: domains: - - sslcert.kcc-test.club. + - sslcert.fixture.club. projectRef: external: ${projectId} resourceID: computemanagedsslcertificate-${uniqueId} \ No newline at end of file diff --git a/tests/e2e/normalize.go b/tests/e2e/normalize.go index 0f787b943b..fcedb680c7 100644 --- a/tests/e2e/normalize.go +++ b/tests/e2e/normalize.go @@ -61,13 +61,11 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject visitor.replacePaths[".status.lastModifiedTime"] = "1970-01-01T00:00:00Z" visitor.replacePaths[".status.etag"] = "abcdef123456" visitor.replacePaths[".status.observedState.etag"] = "abcdef123456" + visitor.replacePaths[".status.observedState.creationTimestamp"] = "1970-01-01T00:00:00Z" // Specific to Sql visitor.replacePaths[".items[].etag"] = "abcdef0123A=" - // Specific to global SSL certificate. This is a server generated id. - visitor.replacePaths[".status.certificateId"] = 1111011111111110000 - // Specific to AlloyDB visitor.replacePaths[".status.continuousBackupInfo[].enabledTime"] = "1970-01-01T00:00:00Z" visitor.replacePaths[".status.ipAddress"] = "10.1.2.3" @@ -105,6 +103,9 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject visitor.replacePaths[".spec.softDeletePolicy.effectiveTime"] = "1970-01-01T00:00:00Z" visitor.replacePaths[".status.observedState.softDeletePolicy.effectiveTime"] = "1970-01-01T00:00:00Z" + // Specific to Compute SSL Certs + visitor.replacePaths[".status.observedState.certificateId"] = "1.719337333063698e+18" + visitor.sortSlices = sets.New[string]() // TODO: This should not be needed, we want to avoid churning the kube objects visitor.sortSlices.Insert(".spec.access") From 8028bb1eb832b180eaa3cd3aacf8b80b3400d107 Mon Sep 17 00:00:00 2001 From: Gemma Hou Date: Thu, 27 Jun 2024 01:04:50 +0000 Subject: [PATCH 064/101] Update regional VPC PSC forwarding rule --- .../compute_v1beta1_computeaddress.yaml | 0 ...compute_v1beta1_computebackendservice.yaml | 0 ...compute_v1beta1_computeforwardingrule.yaml | 1 + .../compute_v1beta1_computenetwork.yaml | 0 ...pute_v1beta1_computeserviceattachment.yaml | 0 .../compute_v1beta1_computesubnetwork.yaml | 0 pkg/test/constants/constants.go | 2 +- .../create.yaml | 1 + .../dependencies.yaml | 2 +- .../update.yaml | 1 + .../compute/computeforwardingrule.md | 271 +++++++++--------- 11 files changed, 141 insertions(+), 137 deletions(-) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computeaddress.yaml (100%) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computebackendservice.yaml (100%) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computeforwardingrule.yaml (94%) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computenetwork.yaml (100%) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computeserviceattachment.yaml (100%) rename config/samples/resources/computeforwardingrule/{forwarding-rule-vpc-psc => regional-forwarding-rule-vpc-psc}/compute_v1beta1_computesubnetwork.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/{privateserviceconnectforwardingrule => regionalforwardingrulepsc}/create.yaml (92%) rename pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/{privateserviceconnectforwardingrule => regionalforwardingrulepsc}/dependencies.yaml (98%) rename pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/{privateserviceconnectforwardingrule => regionalforwardingrulepsc}/update.yaml (92%) diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeaddress.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeaddress.yaml similarity index 100% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeaddress.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeaddress.yaml diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computebackendservice.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computebackendservice.yaml similarity index 100% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computebackendservice.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computebackendservice.yaml diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml similarity index 94% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml index 8cba914e92..c4643d3766 100644 --- a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml +++ b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeforwardingrule.yaml @@ -37,6 +37,7 @@ spec: target: serviceAttachmentRef: name: computeforwardingrule-dep-psc + # the Forwarding Rule should be regional and should be in the same region with the Service Attachment location: us-central1 networkRef: name: computeforwardingrule-dep-psc-consumer diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computenetwork.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computenetwork.yaml similarity index 100% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computenetwork.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computenetwork.yaml diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeserviceattachment.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeserviceattachment.yaml similarity index 100% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computeserviceattachment.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computeserviceattachment.yaml diff --git a/config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computesubnetwork.yaml b/config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computesubnetwork.yaml similarity index 100% rename from config/samples/resources/computeforwardingrule/forwarding-rule-vpc-psc/compute_v1beta1_computesubnetwork.yaml rename to config/samples/resources/computeforwardingrule/regional-forwarding-rule-vpc-psc/compute_v1beta1_computesubnetwork.yaml diff --git a/pkg/test/constants/constants.go b/pkg/test/constants/constants.go index dfbb8a19d5..a4a06f3034 100644 --- a/pkg/test/constants/constants.go +++ b/pkg/test/constants/constants.go @@ -45,7 +45,7 @@ var TestNameRegexesToSkip = []string{ ".*(containerattachedcluster).*", // Disable due to TF bug https://github.com/hashicorp/terraform-provider-google/issues/16255. // We can't specify labels in the create operation, that causes AssertLabelsMatchAndHaveManagedLabel check to fail. - ".*(privateserviceconnectforwardingrule).*", + ".*(regionalforwardingrulepsc).*", } // TestNameRegexToSkipForTestCRUD is similar to diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/create.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/create.yaml similarity index 92% rename from pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/create.yaml rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/create.yaml index 1ddbf89e38..ae5e3adc8a 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/create.yaml @@ -23,6 +23,7 @@ spec: target: serviceAttachmentRef: name: computeserviceattachment-${uniqueId} + # the Forwarding Rule should be regional and should be in the same region with the Service Attachment location: us-central1 networkRef: name: computenetwork-1-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/dependencies.yaml similarity index 98% rename from pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/dependencies.yaml rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/dependencies.yaml index 1f1719d338..8767e919de 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/dependencies.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/dependencies.yaml @@ -42,7 +42,7 @@ spec: region: us-central1 ipCidrRange: "10.0.0.0/16" networkRef: - name: computenetwork-${uniqueId} + name: computenetwork-1-${uniqueId} --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/update.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/update.yaml similarity index 92% rename from pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/update.yaml rename to pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/update.yaml index dd69d49ca6..1cc33e341b 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/privateserviceconnectforwardingrule/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeforwardingrule/regionalforwardingrulepsc/update.yaml @@ -23,6 +23,7 @@ spec: target: serviceAttachmentRef: name: computeserviceattachment-${uniqueId} + # the Forwarding Rule should be regional and should be in the same region with the Service Attachment location: us-central1 networkRef: name: computenetwork-1-${uniqueId} diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeforwardingrule.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeforwardingrule.md index 2fd51e9dce..9b08948e63 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeforwardingrule.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeforwardingrule.md @@ -1248,141 +1248,6 @@ This field is only used for INTERNAL load balancing.{% endverbatim %}

## Sample YAML(s) -### Forwarding Rule Vpc Psc -```yaml -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeForwardingRule -metadata: - name: computeforwardingrule-dep-psc -spec: - location: "us-central1" - networkRef: - name: computeforwardingrule-dep-psc-producer - subnetworkRef: - name: computeforwardingrule-dep1-psc-producer - description: "A test forwarding rule with internal load balancing scheme" - loadBalancingScheme: "INTERNAL" - backendServiceRef: - name: computeforwardingrule-dep-psc - allPorts: true ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeForwardingRule -metadata: - name: computeforwardingrule-sample-psc -spec: - description: "A VPC private service connect forwarding rule" - target: - serviceAttachmentRef: - name: computeforwardingrule-dep-psc - location: us-central1 - networkRef: - name: computeforwardingrule-dep-psc-consumer - # PSC forwarding rule requires loadBalancingScheme to be set to empty - loadBalancingScheme: "" - allowPscGlobalAccess: true - ipAddress: - addressRef: - # Replace ${PROJECT_ID?} with your project ID - # PSC forwarding rule requires address's self_link instead of address - external: "https://www.googleapis.com/compute/v1/projects/${PROJECT_ID?}/regions/us-central1/addresses/computeforwardingrule-dep-psc" ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeAddress -metadata: - name: computeforwardingrule-dep-psc -spec: - location: us-central1 - subnetworkRef: - name: computeforwardingrule-dep-psc-consumer - addressType: "INTERNAL" ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeBackendService -metadata: - name: computeforwardingrule-dep-psc -spec: - location: us-central1 ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: computeforwardingrule-dep-psc-consumer -spec: - description: Consumer network - autoCreateSubnetworks: false ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeNetwork -metadata: - name: computeforwardingrule-dep-psc-producer -spec: - description: Producer network - autoCreateSubnetworks: false ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeServiceAttachment -metadata: - name: computeforwardingrule-dep-psc -spec: - projectRef: - # Replace ${PROJECT_ID?} with your project ID - external: "projects/${PROJECT_ID?}" - location: us-central1 - description: "A dep service attachment" - targetServiceRef: - name: computeforwardingrule-dep-psc - connectionPreference: "ACCEPT_AUTOMATIC" - natSubnets: - - name: "computeforwardingrule-dep2-psc-producer" - enableProxyProtocol: false ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSubnetwork -metadata: - name: computeforwardingrule-dep-psc-consumer -spec: - region: us-central1 - ipCidrRange: "10.0.0.0/16" - networkRef: - name: computeforwardingrule-dep-psc-consumer ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSubnetwork -metadata: - name: computeforwardingrule-dep1-psc-producer -spec: - region: us-central1 - ipCidrRange: "10.0.0.0/16" - networkRef: - name: computeforwardingrule-dep-psc-producer ---- -apiVersion: compute.cnrm.cloud.google.com/v1beta1 -kind: ComputeSubnetwork -metadata: - name: computeforwardingrule-dep2-psc-producer -spec: - region: us-central1 - ipCidrRange: "10.1.0.0/16" - networkRef: - name: computeforwardingrule-dep-psc-producer - purpose: "PRIVATE_SERVICE_CONNECT" -``` - ### Global Forwarding Rule With Target Http Proxy ```yaml # Copyright 2020 Google LLC @@ -1912,6 +1777,142 @@ spec: name: computeforwardingrule-dep-regional ``` +### Regional Forwarding Rule Vpc Psc +```yaml +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeForwardingRule +metadata: + name: computeforwardingrule-dep-psc +spec: + location: "us-central1" + networkRef: + name: computeforwardingrule-dep-psc-producer + subnetworkRef: + name: computeforwardingrule-dep1-psc-producer + description: "A test forwarding rule with internal load balancing scheme" + loadBalancingScheme: "INTERNAL" + backendServiceRef: + name: computeforwardingrule-dep-psc + allPorts: true +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeForwardingRule +metadata: + name: computeforwardingrule-sample-psc +spec: + description: "A VPC private service connect forwarding rule" + target: + serviceAttachmentRef: + name: computeforwardingrule-dep-psc + # the Forwarding Rule should be regional and should be in the same region with the Service Attachment + location: us-central1 + networkRef: + name: computeforwardingrule-dep-psc-consumer + # PSC forwarding rule requires loadBalancingScheme to be set to empty + loadBalancingScheme: "" + allowPscGlobalAccess: true + ipAddress: + addressRef: + # Replace ${PROJECT_ID?} with your project ID + # PSC forwarding rule requires address's self_link instead of address + external: "https://www.googleapis.com/compute/v1/projects/${PROJECT_ID?}/regions/us-central1/addresses/computeforwardingrule-dep-psc" +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeAddress +metadata: + name: computeforwardingrule-dep-psc +spec: + location: us-central1 + subnetworkRef: + name: computeforwardingrule-dep-psc-consumer + addressType: "INTERNAL" +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeBackendService +metadata: + name: computeforwardingrule-dep-psc +spec: + location: us-central1 +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computeforwardingrule-dep-psc-consumer +spec: + description: Consumer network + autoCreateSubnetworks: false +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computeforwardingrule-dep-psc-producer +spec: + description: Producer network + autoCreateSubnetworks: false +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeServiceAttachment +metadata: + name: computeforwardingrule-dep-psc +spec: + projectRef: + # Replace ${PROJECT_ID?} with your project ID + external: "projects/${PROJECT_ID?}" + location: us-central1 + description: "A dep service attachment" + targetServiceRef: + name: computeforwardingrule-dep-psc + connectionPreference: "ACCEPT_AUTOMATIC" + natSubnets: + - name: "computeforwardingrule-dep2-psc-producer" + enableProxyProtocol: false +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeSubnetwork +metadata: + name: computeforwardingrule-dep-psc-consumer +spec: + region: us-central1 + ipCidrRange: "10.0.0.0/16" + networkRef: + name: computeforwardingrule-dep-psc-consumer +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeSubnetwork +metadata: + name: computeforwardingrule-dep1-psc-producer +spec: + region: us-central1 + ipCidrRange: "10.0.0.0/16" + networkRef: + name: computeforwardingrule-dep-psc-producer +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeSubnetwork +metadata: + name: computeforwardingrule-dep2-psc-producer +spec: + region: us-central1 + ipCidrRange: "10.1.0.0/16" + networkRef: + name: computeforwardingrule-dep-psc-producer + purpose: "PRIVATE_SERVICE_CONNECT" +``` + Note: If you have any trouble with instantiating the resource, refer to Troubleshoot Config Connector. From c9301d6eaeb9e809fcde0a972aa38686080716fa Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 22 May 2024 16:15:46 -0400 Subject: [PATCH 065/101] Support setting IAM on PrivateCACAPool This uses our direct actuation framework. --- .../controllerbuilder/template/controller.go | 5 + docs/releasenotes/release-1.120.md | 2 + go.mod | 3 +- go.sum | 2 + .../direct/alloydb/cluster_controller.go | 4 + .../direct/apikeys/apikeyskey_controller.go | 4 + .../cloudbuild/workerpool_controller.go | 4 + .../directbase/directbase_controller.go | 3 +- .../direct/directbase/interfaces.go | 3 + pkg/controller/direct/export.go | 86 ++----- .../gkehub/featuremembership_controller.go | 4 + pkg/controller/direct/iam.go | 189 ++++++++++++++++ .../direct/logging/logmetric_controller.go | 29 +++ .../monitoringdashboard_controller.go | 30 +++ pkg/controller/direct/privateca/client.go | 70 ++++++ .../privateca/privatecapool_controller.go | 212 ++++++++++++++++++ pkg/controller/direct/privateca/utils.go | 64 ++++++ .../direct/references/projectref.go | 14 ++ pkg/controller/direct/register/register.go | 1 + pkg/controller/direct/registry/references.go | 98 ++++++++ pkg/controller/direct/registry/registry.go | 17 ++ .../resourcemanager/tagkey_controller.go | 4 + pkg/controller/iam/iamclient/iamclient.go | 33 ++- pkg/k8s/errors.go | 2 + pkg/webhook/iam_validator.go | 6 + tests/e2e/unified_test.go | 6 +- 26 files changed, 820 insertions(+), 75 deletions(-) create mode 100644 pkg/controller/direct/iam.go create mode 100644 pkg/controller/direct/privateca/client.go create mode 100644 pkg/controller/direct/privateca/privatecapool_controller.go create mode 100644 pkg/controller/direct/privateca/utils.go create mode 100644 pkg/controller/direct/registry/references.go diff --git a/dev/tools/controllerbuilder/template/controller.go b/dev/tools/controllerbuilder/template/controller.go index cbb4054017..94fb7e16cd 100644 --- a/dev/tools/controllerbuilder/template/controller.go +++ b/dev/tools/controllerbuilder/template/controller.go @@ -126,6 +126,11 @@ func (m *model) AdapterForObject(ctx context.Context, reader client.Reader, u *u }, nil } +func (m *model) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + // TODO: Support URLs + return nil, nil +} + type Adapter struct { resourceID string projectID string diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 39c575ff12..2bda5f1566 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -4,6 +4,8 @@ * ... +* IAM configuration can now be applied to `PrivateCACAPool`, using our direct-actuation approach. + * Special shout-outs to ... for their contributions to this release. TODO: list contributors with `git log v1.120.0... | grep Merge | grep from | awk '{print $6}' | cut -d '/' -f 1 | sort | uniq` diff --git a/go.mod b/go.mod index b6ddc0e8eb..c656ecae7b 100644 --- a/go.mod +++ b/go.mod @@ -10,10 +10,12 @@ require ( cloud.google.com/go/apikeys v1.1.7 cloud.google.com/go/cloudbuild v1.16.1 cloud.google.com/go/compute v1.27.0 + cloud.google.com/go/iam v1.1.8 cloud.google.com/go/monitoring v1.19.0 cloud.google.com/go/profiler v0.4.0 cloud.google.com/go/resourcemanager v1.9.7 cloud.google.com/go/securesourcemanager v0.1.5 + cloud.google.com/go/security v1.17.0 contrib.go.opencensus.io/exporter/prometheus v0.1.0 github.com/GoogleCloudPlatform/declarative-resource-client-library v1.62.0 github.com/GoogleCloudPlatform/k8s-config-connector/mockgcp v0.0.0-20240614222432-4bde5b345380 @@ -77,7 +79,6 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/bigtable v1.25.0 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect cloud.google.com/go/longrunning v0.5.7 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect diff --git a/go.sum b/go.sum index 5810256fd0..7cb24da921 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ cloud.google.com/go/resourcemanager v1.9.7 h1:SdvD0PaPX60+yeKoSe16mawFpM0EPuiPPi cloud.google.com/go/resourcemanager v1.9.7/go.mod h1:cQH6lJwESufxEu6KepsoNAsjrUtYYNXRwxm4QFE5g8A= cloud.google.com/go/securesourcemanager v0.1.5 h1:+6x067eHPHyDU8ed+ybNEWudtngaz/bAoehhyy5bO5M= cloud.google.com/go/securesourcemanager v0.1.5/go.mod h1:RTBWXAILmlm91TsDBmKUzUevfjB1HSXo85nsF8JEWjc= +cloud.google.com/go/security v1.17.0 h1:u4RCnEQPvlrrnFRFinU0T3WsjtrsQErkWBfqTM5oUQI= +cloud.google.com/go/security v1.17.0/go.mod h1:eSuFs0SlBv1gWg7gHIoF0hYOvcSwJCek/GFXtgO6aA0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= diff --git a/pkg/controller/direct/alloydb/cluster_controller.go b/pkg/controller/direct/alloydb/cluster_controller.go index bd3b06cccc..7af87444f2 100644 --- a/pkg/controller/direct/alloydb/cluster_controller.go +++ b/pkg/controller/direct/alloydb/cluster_controller.go @@ -110,6 +110,10 @@ func (m *clusterModel) AdapterForObject(ctx context.Context, reader client.Reade }, nil } +func (m *clusterModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + return nil, nil +} + // adapter implements the Adapter interface. var _ directbase.Adapter = &clusterAdapter{} diff --git a/pkg/controller/direct/apikeys/apikeyskey_controller.go b/pkg/controller/direct/apikeys/apikeyskey_controller.go index 5d39a7a422..c2993ee16f 100644 --- a/pkg/controller/direct/apikeys/apikeyskey_controller.go +++ b/pkg/controller/direct/apikeys/apikeyskey_controller.go @@ -147,6 +147,10 @@ func (m *model) AdapterForObject(ctx context.Context, reader client.Reader, u *u }, nil } +func (m *model) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + return nil, nil +} + // Find implements the Adapter interface. func (a *adapter) Find(ctx context.Context) (bool, error) { if a.keyID == "" { diff --git a/pkg/controller/direct/cloudbuild/workerpool_controller.go b/pkg/controller/direct/cloudbuild/workerpool_controller.go index e471c3de6f..60123b67e7 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_controller.go +++ b/pkg/controller/direct/cloudbuild/workerpool_controller.go @@ -127,6 +127,10 @@ func (m *model) AdapterForObject(ctx context.Context, reader client.Reader, u *u }, nil } +func (m *model) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + return nil, nil +} + type Adapter struct { resourceID string projectID string diff --git a/pkg/controller/direct/directbase/directbase_controller.go b/pkg/controller/direct/directbase/directbase_controller.go index c9daa53447..cc85739fea 100644 --- a/pkg/controller/direct/directbase/directbase_controller.go +++ b/pkg/controller/direct/directbase/directbase_controller.go @@ -23,7 +23,6 @@ import ( "github.com/GoogleCloudPlatform/k8s-config-connector/operator/pkg/apis/core/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/operator/pkg/kccstate" - kcciamclient "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/iam/iamclient" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/jitter" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/lifecyclehandler" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/metrics" @@ -254,7 +253,7 @@ func (r *reconcileContext) doReconcile(ctx context.Context, u *unstructured.Unst } if !k8s.HasAbandonAnnotation(u) { if _, err := adapter.Delete(ctx); err != nil { - if !errors.Is(err, kcciamclient.ErrNotFound) && !k8s.IsReferenceNotFoundError(err) { + if !errors.Is(err, k8s.ErrIAMNotFound) && !k8s.IsReferenceNotFoundError(err) { if unwrappedErr, ok := lifecyclehandler.CausedByUnresolvableDeps(err); ok { logger.Info(unwrappedErr.Error(), "resource", k8s.GetNamespacedName(u)) resource, err := toK8sResource(u) diff --git a/pkg/controller/direct/directbase/interfaces.go b/pkg/controller/direct/directbase/interfaces.go index 38d7fb4d8f..7d070b2285 100644 --- a/pkg/controller/direct/directbase/interfaces.go +++ b/pkg/controller/direct/directbase/interfaces.go @@ -26,6 +26,9 @@ type Model interface { // AdapterForObject builds an operation object for reconciling the object u. // If there are references, AdapterForObject should dereference them before returning (using reader) AdapterForObject(ctx context.Context, reader client.Reader, u *unstructured.Unstructured) (Adapter, error) + + // AdapterForURL builds an operation object for exporting the object u. + AdapterForURL(ctx context.Context, url string) (Adapter, error) } // Adapter performs a single reconciliation on a single object. diff --git a/pkg/controller/direct/export.go b/pkg/controller/direct/export.go index 6139c8438c..c17e43e826 100644 --- a/pkg/controller/direct/export.go +++ b/pkg/controller/direct/export.go @@ -17,88 +17,36 @@ package direct import ( "context" "fmt" - "strings" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/config" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" ) // Export attempts to export the resource specified by url. // The url format should match the Cloud-Asset-Inventory format: https://cloud.google.com/asset-inventory/docs/resource-name-format // If url is not recognized or not implemented by a direct controller, this returns (nil, nil) func Export(ctx context.Context, url string, config *config.ControllerConfig) (*unstructured.Unstructured, error) { - if strings.HasPrefix(url, "//logging.googleapis.com/") { - tokens := strings.Split(strings.TrimPrefix(url, "//logging.googleapis.com/"), "/") - if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "metrics" { - model, err := registry.GetModel(schema.GroupKind{Group: "logging.cnrm.cloud.google.com", Kind: "LoggingLogMetric"}) - if err != nil { - return nil, err - } - in := &unstructured.Unstructured{} - in.SetName(tokens[3]) - if err := unstructured.SetNestedField(in.Object, tokens[1], "spec", "projectRef", "external"); err != nil { - return nil, err - } - - var reader client.Reader // TODO: Create erroring reader? - a, err := model.AdapterForObject(ctx, reader, in) - if err != nil { - return nil, err - } - found, err := a.Find(ctx) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("resource %q is not found", url) - } - - u, err := a.Export(ctx) - if err != nil { - return nil, err - } - - return u, nil - } + adapter, err := registry.AdapterForURL(ctx, url) + if err != nil { + return nil, err } + if adapter != nil { + found, err := adapter.Find(ctx) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("resource %q is not found", url) + } - //monitoring.googleapis.com/projects/PROJECT_NUMBER/dashboards/DASHBOARD_ID - if strings.HasPrefix(url, "//monitoring.googleapis.com/") { - tokens := strings.Split(strings.TrimPrefix(url, "//monitoring.googleapis.com/"), "/") - if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "dashboards" { - model, err := registry.GetModel(schema.GroupKind{Group: "monitoring.cnrm.cloud.google.com", Kind: "MonitoringDashboard"}) - if err != nil { - return nil, err - } - in := &unstructured.Unstructured{} - in.SetName(tokens[3]) - if err := unstructured.SetNestedField(in.Object, tokens[1], "spec", "projectRef", "external"); err != nil { - return nil, err - } - - var reader client.Reader // TODO: Create erroring reader? - a, err := model.AdapterForObject(ctx, reader, in) - if err != nil { - return nil, err - } - found, err := a.Find(ctx) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("resource %q is not found", url) - } - - u, err := a.Export(ctx) - if err != nil { - return nil, err - } - - return u, nil + u, err := adapter.Export(ctx) + if err != nil { + return nil, err } + + return u, nil } + return nil, nil } diff --git a/pkg/controller/direct/gkehub/featuremembership_controller.go b/pkg/controller/direct/gkehub/featuremembership_controller.go index 7063abd897..1fce73f15a 100644 --- a/pkg/controller/direct/gkehub/featuremembership_controller.go +++ b/pkg/controller/direct/gkehub/featuremembership_controller.go @@ -117,6 +117,10 @@ func (m *gkeHubModel) AdapterForObject(ctx context.Context, reader client.Reader }, nil } +func (m *gkeHubModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + return nil, nil +} + func resolveIAMReferences(ctx context.Context, reader client.Reader, obj *krm.GKEHubFeatureMembership) error { spec := obj.Spec if spec.Configmanagement != nil && spec.Configmanagement.ConfigSync != nil { diff --git a/pkg/controller/direct/iam.go b/pkg/controller/direct/iam.go new file mode 100644 index 0000000000..31bb65b360 --- /dev/null +++ b/pkg/controller/direct/iam.go @@ -0,0 +1,189 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package direct + +import ( + "context" + "fmt" + + "cloud.google.com/go/iam/apiv1/iampb" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/iam/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type IAMAdapter interface { + GetIAMPolicy(ctx context.Context) (*iampb.Policy, error) + SetIAMPolicy(ctx context.Context, policy *iampb.Policy) (*iampb.Policy, error) +} + +// GetIAMPolicyMember returns the actual IAMPolicyMember for the specified member and referenced resource. +func GetIAMPolicyMember(ctx context.Context, reader client.Reader, want *v1beta1.IAMPolicyMember, memberID v1beta1.Member) (*v1beta1.IAMPolicyMember, error) { + adapter, err := registry.AdapterForReference(ctx, reader, want.GetNamespace(), want.Spec.ResourceReference) + if err != nil { + return nil, fmt.Errorf("building adapter: %w", err) + } + iamAdapter, ok := adapter.(IAMAdapter) + if !ok { + return nil, fmt.Errorf("adapter does not implement IAMAdapter") + } + + policy, err := iamAdapter.GetIAMPolicy(ctx) + if err != nil { + return nil, fmt.Errorf("getting IAM policy: %w", err) + } + + actual := &v1beta1.IAMPolicyMember{} + actual.ObjectMeta = want.ObjectMeta + actual.Spec = v1beta1.IAMPolicyMemberSpec{ + ResourceReference: want.Spec.ResourceReference, + } + + actual.Spec.Member = memberID + + for _, binding := range policy.Bindings { + if binding.Role != want.Spec.Role { + continue + } + for _, member := range binding.Members { + if member == string(memberID) { + actual.Spec.Role = want.Spec.Role + } + } + } + return actual, nil +} + +// SetIAMPolicyMember will update the IAM policy for the specified member +func SetIAMPolicyMember(ctx context.Context, reader client.Reader, want *v1beta1.IAMPolicyMember, memberID v1beta1.Member) (*v1beta1.IAMPolicyMember, error) { + adapter, err := registry.AdapterForReference(ctx, reader, want.GetNamespace(), want.Spec.ResourceReference) + if err != nil { + return nil, fmt.Errorf("building adapter: %w", err) + } + iamAdapter, ok := adapter.(IAMAdapter) + if !ok { + return nil, fmt.Errorf("adapter does not implement IAMAdapter") + } + + policy, err := iamAdapter.GetIAMPolicy(ctx) + if err != nil { + return nil, fmt.Errorf("getting IAM policy: %w", err) + } + + var binding *iampb.Binding + for _, b := range policy.Bindings { + if b.Role != want.Spec.Role { + continue + } + binding = b + } + + if binding == nil { + binding = &iampb.Binding{ + Role: want.Spec.Role, + } + policy.Bindings = append(policy.Bindings, binding) + } + + hasMember := false + for _, member := range binding.Members { + if member == string(memberID) { + hasMember = true + } + } + latest := policy + if !hasMember { + binding.Members = append(binding.Members, string(memberID)) + newPolicy, err := iamAdapter.SetIAMPolicy(ctx, policy) + if err != nil { + return nil, fmt.Errorf("setting IAM policy: %w", err) + } + latest = newPolicy + } + + actual := &v1beta1.IAMPolicyMember{} + actual.ObjectMeta = want.ObjectMeta + actual.Spec = v1beta1.IAMPolicyMemberSpec{ + ResourceReference: want.Spec.ResourceReference, + } + + actual.Spec.Member = memberID + + for _, binding := range latest.Bindings { + if binding.Role != want.Spec.Role { + continue + } + for _, member := range binding.Members { + if member == string(memberID) { + actual.Spec.Role = want.Spec.Role + } + } + } + return actual, nil +} + +// DeleteIAMPolicyMember will remove the specified member for the IAM policy for a resource +func DeleteIAMPolicyMember(ctx context.Context, reader client.Reader, want *v1beta1.IAMPolicyMember, removeMember v1beta1.Member) error { + log := klog.FromContext(ctx) + + adapter, err := registry.AdapterForReference(ctx, reader, want.GetNamespace(), want.Spec.ResourceReference) + if err != nil { + return fmt.Errorf("building adapter: %w", err) + } + iamAdapter, ok := adapter.(IAMAdapter) + if !ok { + return fmt.Errorf("adapter does not implement IAMAdapter") + } + + policy, err := iamAdapter.GetIAMPolicy(ctx) + if err != nil { + return fmt.Errorf("getting IAM policy: %w", err) + } + + var binding *iampb.Binding + for _, b := range policy.Bindings { + if b.Role != want.Spec.Role { + continue + } + binding = b + } + + if binding == nil { + return nil + } + + var newMembers []string + removedMember := false + for _, member := range binding.Members { + if member == string(removeMember) { + removedMember = true + continue + } + newMembers = append(newMembers, member) + } + binding.Members = newMembers + + if !removedMember { + return nil + } + newPolicy, err := iamAdapter.SetIAMPolicy(ctx, policy) + if err != nil { + return fmt.Errorf("setting IAM policy: %w", err) + } + + log.Info("updated iam policy to remove member", "updatedPolicy", newPolicy, "member", removeMember) + return nil +} diff --git a/pkg/controller/direct/logging/logmetric_controller.go b/pkg/controller/direct/logging/logmetric_controller.go index 4d8fa584e5..e01c96ea7e 100644 --- a/pkg/controller/direct/logging/logmetric_controller.go +++ b/pkg/controller/direct/logging/logmetric_controller.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "reflect" + "strings" api "google.golang.org/api/logging/v2" corev1 "k8s.io/api/core/v1" @@ -108,6 +109,34 @@ func (m *logMetricModel) AdapterForObject(ctx context.Context, reader client.Rea }, nil } +func (m *logMetricModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + // Format: //logging.googleapis.com/projects//metrics/ + if !strings.HasPrefix(url, "//logging.googleapis.com/") { + return nil, nil + } + + tokens := strings.Split(strings.TrimPrefix(url, "//logging.googleapis.com/"), "/") + if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "metrics" { + gcpClient, err := newGCPClient(ctx, m.config) + if err != nil { + return nil, err + } + + projectMetricsService, err := gcpClient.newProjectMetricsService(ctx) + if err != nil { + return nil, err + } + + return &logMetricAdapter{ + projectID: tokens[1], + resourceID: tokens[3], + logMetricClient: projectMetricsService, + }, nil + } + + return nil, nil +} + func (a *logMetricAdapter) Find(ctx context.Context) (bool, error) { if a.resourceID == "" { return false, nil diff --git a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go index c04c129a01..a9a165bc77 100644 --- a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go +++ b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go @@ -17,6 +17,7 @@ package monitoring import ( "context" "fmt" + "strings" api "cloud.google.com/go/monitoring/dashboard/apiv1" pb "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb" @@ -113,6 +114,35 @@ func (m *dashboardModel) AdapterForObject(ctx context.Context, kube client.Reade }, nil } +func (m *dashboardModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + // Format: //monitoring.googleapis.com/projects/PROJECT_NUMBER/dashboards/DASHBOARD_ID + if !strings.HasPrefix(url, "//monitoring.googleapis.com/") { + return nil, nil + } + + tokens := strings.Split(strings.TrimPrefix(url, "//monitoring.googleapis.com/"), "/") + if len(tokens) == 4 && tokens[0] == "projects" && tokens[2] == "dashboards" { + gcpClient, err := newGCPClient(ctx, m.config) + if err != nil { + return nil, fmt.Errorf("building gcp client: %w", err) + } + + dashboardsClient, err := gcpClient.newDashboardsClient(ctx) + if err != nil { + return nil, err + } + + return &dashboardAdapter{ + projectID: tokens[1], + resourceID: tokens[3], + dashboardsClient: dashboardsClient, + }, nil + } + + return nil, nil + +} + // Find implements the Adapter interface. func (a *dashboardAdapter) Find(ctx context.Context) (bool, error) { if a.resourceID == "" { diff --git a/pkg/controller/direct/privateca/client.go b/pkg/controller/direct/privateca/client.go new file mode 100644 index 0000000000..ae228ffd7a --- /dev/null +++ b/pkg/controller/direct/privateca/client.go @@ -0,0 +1,70 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package privateca + +import ( + "context" + "fmt" + + api "cloud.google.com/go/security/privateca/apiv1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/config" + "google.golang.org/api/option" +) + +type gcpClient struct { + config config.ControllerConfig +} + +func newGCPClient(ctx context.Context, config *config.ControllerConfig) (*gcpClient, error) { + gcpClient := &gcpClient{ + config: *config, + } + return gcpClient, nil +} + +func (m *gcpClient) options() ([]option.ClientOption, error) { + var opts []option.ClientOption + // TODO: Support for useragent + if m.config.UserAgent != "" { + opts = append(opts, option.WithUserAgent(m.config.UserAgent)) + } + if m.config.HTTPClient != nil { + // TODO: Set UserAgent in this scenario (error is: WithHTTPClient is incompatible with gRPC dial options) + opts = append(opts, option.WithHTTPClient(m.config.HTTPClient)) + } + if m.config.UserProjectOverride && m.config.BillingProject != "" { + opts = append(opts, option.WithQuotaProject(m.config.BillingProject)) + } + + // TODO: support endpoints? + // if m.config.Endpoint != "" { + // opts = append(opts, option.WithEndpoint(m.config.Endpoint)) + // } + + return opts, nil +} + +func (m *gcpClient) newCertificateAuthorityClient(ctx context.Context) (*api.CertificateAuthorityClient, error) { + opts, err := m.options() + if err != nil { + return nil, err + } + service, err := api.NewCertificateAuthorityRESTClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("building service for certificate authority: %w", err) + } + + return service, nil +} diff --git a/pkg/controller/direct/privateca/privatecapool_controller.go b/pkg/controller/direct/privateca/privatecapool_controller.go new file mode 100644 index 0000000000..5b8e4befae --- /dev/null +++ b/pkg/controller/direct/privateca/privatecapool_controller.go @@ -0,0 +1,212 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package privateca + +import ( + "context" + "fmt" + "strings" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + api "cloud.google.com/go/security/privateca/apiv1" + pb "cloud.google.com/go/security/privateca/apiv1/privatecapb" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + krm "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/privateca/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/config" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/directbase" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/references" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" +) + +func init() { + registry.RegisterModel(krm.PrivateCACAPoolGVK, newCAPoolModel) +} + +func newCAPoolModel(ctx context.Context, config *config.ControllerConfig) (directbase.Model, error) { + gcpClient, err := newGCPClient(ctx, config) + if err != nil { + return nil, fmt.Errorf("building GCP client: %w", err) + } + return &caPoolModel{gcpClient: gcpClient}, nil +} + +type caPoolModel struct { + *gcpClient +} + +// model implements the Model interface. +var _ directbase.Model = &caPoolModel{} + +type caPoolAdapter struct { + projectID string + location string + caPoolID string + + desired *krm.PrivateCACAPool + actual *pb.CaPool + caClient *api.CertificateAuthorityClient +} + +var _ directbase.Adapter = &caPoolAdapter{} + +// AdapterForObject implements the Model interface. +func (m *caPoolModel) AdapterForObject(ctx context.Context, reader client.Reader, u *unstructured.Unstructured) (directbase.Adapter, error) { + caClient, err := m.newCertificateAuthorityClient(ctx) + if err != nil { + return nil, err + } + + obj := &krm.PrivateCACAPool{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &obj); err != nil { + return nil, fmt.Errorf("error converting to %T: %w", obj, err) + } + + resourceID := ValueOf(obj.Spec.ResourceID) + if resourceID == "" { + resourceID = obj.GetName() + } + if resourceID == "" { + return nil, fmt.Errorf("cannot resolve resource ID") + } + + location := obj.Spec.Location + if location == "" { + return nil, fmt.Errorf("cannot resolve location") + } + + projectRef, err := references.ResolveProject(ctx, reader, obj, references.AsProjectRef(&obj.Spec.ProjectRef)) + if err != nil { + return nil, err + } + projectID := projectRef.ProjectID + if projectID == "" { + return nil, fmt.Errorf("cannot resolve project") + } + + return &caPoolAdapter{ + caPoolID: resourceID, + location: location, + projectID: projectID, + desired: obj, + caClient: caClient, + }, nil +} + +func (m *caPoolModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + // Format is //privateca.googleapis.com/projects/PROJECT_ID/locations/LOCATION/caPools/CA_POOL_ID + + if !strings.HasPrefix(url, "//privateca.googleapis.com/") { + return nil, nil + } + + tokens := strings.Split(strings.TrimPrefix(url, "//privateca.googleapis.com/"), "/") + if len(tokens) == 6 && tokens[0] == "projects" && tokens[2] == "locations" && tokens[4] == "caPools" { + caClient, err := m.newCertificateAuthorityClient(ctx) + if err != nil { + return nil, err + } + + return &caPoolAdapter{ + projectID: tokens[1], + location: tokens[3], + caPoolID: tokens[5], + caClient: caClient, + }, nil + } + + return nil, nil +} + +// Delete implements the Adapter interface. +func (a *caPoolAdapter) Delete(ctx context.Context) (bool, error) { + return false, fmt.Errorf("not implemented") +} + +// Create implements the Adapter interface. +func (a *caPoolAdapter) Create(ctx context.Context, u *unstructured.Unstructured) error { + return fmt.Errorf("not implemented") +} + +// Update implements the Adapter interface. +func (a *caPoolAdapter) Update(ctx context.Context, u *unstructured.Unstructured) error { + return fmt.Errorf("not implemented") +} + +// Export implements the Adapter interface. +func (a *caPoolAdapter) Export(ctx context.Context) (*unstructured.Unstructured, error) { + return nil, fmt.Errorf("not implemented") +} + +// Find implements the Adapter interface. +func (a *caPoolAdapter) Find(ctx context.Context) (bool, error) { + if a.caPoolID == "" { + return false, nil + } + + req := &pb.GetCaPoolRequest{ + Name: a.fullyQualifiedName(), + } + logMetric, err := a.caClient.GetCaPool(ctx, req) + if err != nil { + if IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("getting logMetric %q: %w", a.fullyQualifiedName(), err) + } + + a.actual = logMetric + + return true, nil +} + +func (a *caPoolAdapter) GetIAMPolicy(ctx context.Context) (*iampb.Policy, error) { + if a.caPoolID == "" { + return nil, fmt.Errorf("cannot get iam policy for missing resource") + } + + req := &iampb.GetIamPolicyRequest{ + Resource: a.fullyQualifiedName(), + } + policy, err := a.caClient.GetIamPolicy(ctx, req) + if err != nil { + return nil, fmt.Errorf("getting iam policy for %q: %w", a.fullyQualifiedName(), err) + } + + return policy, nil +} + +func (a *caPoolAdapter) SetIAMPolicy(ctx context.Context, policy *iampb.Policy) (*iampb.Policy, error) { + if a.caPoolID == "" { + return nil, fmt.Errorf("cannot get iam policy for missing resource") + } + + req := &iampb.SetIamPolicyRequest{ + Resource: a.fullyQualifiedName(), + Policy: policy, + } + newPolicy, err := a.caClient.SetIamPolicy(ctx, req) + if err != nil { + return nil, fmt.Errorf("setting iam policy for %q: %w", a.fullyQualifiedName(), err) + } + + return newPolicy, nil +} + +func (a *caPoolAdapter) fullyQualifiedName() string { + return fmt.Sprintf("projects/%s/locations/%s/caPools/%s", a.projectID, a.location, a.caPoolID) +} diff --git a/pkg/controller/direct/privateca/utils.go b/pkg/controller/direct/privateca/utils.go new file mode 100644 index 0000000000..e01a625ce2 --- /dev/null +++ b/pkg/controller/direct/privateca/utils.go @@ -0,0 +1,64 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package privateca + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "k8s.io/klog/v2" +) + +// todo acpana: add to factor out to top level package +// todo acpana: begin +func ValueOf[T any](p *T) T { + var v T + if p != nil { + v = *p + } + return v +} + +// LazyPtr returns a pointer to v, unless it is the empty value, in which case it returns nil. +// It is essentially the inverse of ValueOf, though it is lossy +// because we can't tell nil and empty apart without a pointer. +func LazyPtr[T comparable](v T) *T { + var defaultValue T + if v == defaultValue { + return nil + } + return &v +} + +// IsNotFound returns true if the given error is an HTTP 404. +func IsNotFound(err error) bool { + return HasHTTPCode(err, 404) +} + +// HasHTTPCode returns true if the given error is an HTTP response with the given code. +func HasHTTPCode(err error, code int) bool { + if err == nil { + return false + } + apiError := &apierror.APIError{} + if errors.As(err, &apiError) { + if apiError.HTTPCode() == code { + return true + } + } else { + klog.Warningf("unexpected error type %T", err) + } + return false +} diff --git a/pkg/controller/direct/references/projectref.go b/pkg/controller/direct/references/projectref.go index 435b46d263..8295d47761 100644 --- a/pkg/controller/direct/references/projectref.go +++ b/pkg/controller/direct/references/projectref.go @@ -20,6 +20,7 @@ import ( "strings" refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -92,3 +93,16 @@ func ResolveProject(ctx context.Context, reader client.Reader, src client.Object ProjectID: projectID, }, nil } + +// AsProjectRef converts a generic ResourceRef into a ProjectRef +func AsProjectRef(in *v1alpha1.ResourceRef) *refs.ProjectRef { + if in == nil { + return nil + } + return &refs.ProjectRef{ + Namespace: in.Namespace, + Name: in.Name, + External: in.External, + Kind: in.Kind, + } +} diff --git a/pkg/controller/direct/register/register.go b/pkg/controller/direct/register/register.go index 23a9957548..fd38591091 100644 --- a/pkg/controller/direct/register/register.go +++ b/pkg/controller/direct/register/register.go @@ -21,5 +21,6 @@ import ( _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/gkehub" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/logging" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/monitoring" + _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/privateca" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/resourcemanager" ) diff --git a/pkg/controller/direct/registry/references.go b/pkg/controller/direct/registry/references.go new file mode 100644 index 0000000000..53b2a878c8 --- /dev/null +++ b/pkg/controller/direct/registry/references.go @@ -0,0 +1,98 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "context" + "fmt" + "strings" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/iam/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/directbase" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func AdapterForReference(ctx context.Context, reader client.Reader, sourceNamespace string, resourceRef v1beta1.ResourceReference) (directbase.Adapter, error) { + obj := &unstructured.Unstructured{} + + var gk schema.GroupKind + switch resourceRef.Kind { + default: + gk = resourceRef.GroupVersionKind().GroupKind() + } + + if gk.Group == "" { + return nil, fmt.Errorf("cannot find group for reference %v (must set apiVersion)", resourceRef) + } + + if resourceRef.External != "" { + uri := "" + if !strings.HasPrefix(uri, "//") { + switch gk.Group { + case "privateca.cnrm.google.com": + uri = "//privateca.googleapis.com/" + resourceRef.External + default: + return nil, fmt.Errorf("unknown format for external reference for %v: %q", gk, resourceRef.External) + } + } + + adapter, err := AdapterForURL(ctx, uri) + if err != nil { + return nil, fmt.Errorf("resolving %q: %w", uri, err) + } + if adapter == nil { + return nil, fmt.Errorf("unknown format for external reference for %v: %q", gk, resourceRef.External) + } + return adapter, nil + } + + model, err := GetModel(gk) + if err != nil { + return nil, fmt.Errorf("cannot handle references to %v (in direct controller)", gk) + } + + gvk, ok := PreferredGVK(gk) + if !ok { + return nil, fmt.Errorf("preferred GVK is not known for %v", gk) + } + + obj.SetGroupVersionKind(gvk) + nn := types.NamespacedName{ + Namespace: resourceRef.Namespace, + Name: resourceRef.Name, + } + if nn.Namespace == "" { + nn.Namespace = sourceNamespace + } + + if err := reader.Get(ctx, nn, obj); err != nil { + if apierrors.IsNotFound(err) { + return nil, k8s.NewReferenceNotFoundError(gvk, nn) + } + return nil, fmt.Errorf("error retrieving resource '%v' with GroupVersionKind '%v': %w", nn, gvk, err) + } + + adapter, err := model.AdapterForObject(ctx, reader, obj) + if err != nil { + return nil, fmt.Errorf("building adapter: %w", err) + } + + return adapter, nil +} diff --git a/pkg/controller/direct/registry/registry.go b/pkg/controller/direct/registry/registry.go index 3a12265fc5..8b05546ef6 100644 --- a/pkg/controller/direct/registry/registry.go +++ b/pkg/controller/direct/registry/registry.go @@ -53,6 +53,23 @@ func PreferredGVK(gk schema.GroupKind) (schema.GroupVersionKind, bool) { return registration.gvk, true } +// AdapterForURL will return a directbase.Adapter bound to the resource specified by the URL, +// or (nil, nil) if it is not recognized. +func AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + for _, registration := range singleton.registrations { + if registration.model == nil { + return nil, fmt.Errorf("registry was not initialized") + } + adapter, err := registration.model.AdapterForURL(ctx, url) + if err != nil { + return nil, err + } + if adapter != nil { + return adapter, nil + } + } + return nil, nil +} func Init(ctx context.Context, config *config.ControllerConfig) error { for _, registration := range singleton.registrations { model, err := registration.factory(ctx, config) diff --git a/pkg/controller/direct/resourcemanager/tagkey_controller.go b/pkg/controller/direct/resourcemanager/tagkey_controller.go index 39bd057340..cbe3484552 100644 --- a/pkg/controller/direct/resourcemanager/tagkey_controller.go +++ b/pkg/controller/direct/resourcemanager/tagkey_controller.go @@ -94,6 +94,10 @@ func (m *tagKeyModel) AdapterForObject(ctx context.Context, reader client.Reader }, nil } +func (m *tagKeyModel) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + return nil, nil +} + // Find implements the Adapter interface. func (a *tagKeyAdapter) Find(ctx context.Context) (bool, error) { if a.resourceID == "" { diff --git a/pkg/controller/iam/iamclient/iamclient.go b/pkg/controller/iam/iamclient/iamclient.go index 7573e16f7e..0b7fbb4fdc 100644 --- a/pkg/controller/iam/iamclient/iamclient.go +++ b/pkg/controller/iam/iamclient/iamclient.go @@ -20,11 +20,14 @@ import ( "regexp" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/iam/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/dcl/conversion" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/servicemapping/servicemappingloader" mmdcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dcliam "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" tfschema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,7 +57,7 @@ const ( ) var ( - ErrNotFound = fmt.Errorf("IAM resource does not exist") + ErrNotFound = k8s.ErrIAMNotFound logger = klog.Log.WithName("iamclient") ProjectGVK = schema.GroupVersionKind{ @@ -91,6 +94,7 @@ var idTemplateVarsRegex = regexp.MustCompile(`{{[a-z]([a-zA-Z0-9\-_.]*[a-zA-Z0-9 type IAMClient struct { TFIAMClient *TFIAMClient DCLIAMClient *DCLIAMClient + kubeClient client.Client } func New(tfProvider *tfschema.Provider, @@ -114,11 +118,21 @@ func New(tfProvider *tfschema.Provider, iamClient := IAMClient{ TFIAMClient: &tfIAMClient, DCLIAMClient: &dclIAMClient, + kubeClient: kubeClient, } return &iamClient } func (c *IAMClient) SetPolicyMember(ctx context.Context, policyMember *v1beta1.IAMPolicyMember) (*v1beta1.IAMPolicyMember, error) { + if registry.IsIAMDirect(policyMember.Spec.ResourceReference.GroupVersionKind().GroupKind()) { + id, err := ResolveMemberIdentity(ctx, policyMember.Spec.Member, policyMember.Spec.MemberFrom, policyMember.GetNamespace(), c.TFIAMClient) + if err != nil { + return nil, err + } + + return direct.SetIAMPolicyMember(ctx, c.kubeClient, policyMember, v1beta1.Member(id)) + } + if c.isDCLBasedIAMResource(policyMember) { return c.DCLIAMClient.SetPolicyMember(ctx, c.TFIAMClient, policyMember) } @@ -126,6 +140,14 @@ func (c *IAMClient) SetPolicyMember(ctx context.Context, policyMember *v1beta1.I } func (c *IAMClient) GetPolicyMember(ctx context.Context, policyMember *v1beta1.IAMPolicyMember) (*v1beta1.IAMPolicyMember, error) { + if registry.IsIAMDirect(policyMember.Spec.ResourceReference.GroupVersionKind().GroupKind()) { + id, err := ResolveMemberIdentity(ctx, policyMember.Spec.Member, policyMember.Spec.MemberFrom, policyMember.GetNamespace(), c.TFIAMClient) + if err != nil { + return nil, err + } + + return direct.GetIAMPolicyMember(ctx, c.kubeClient, policyMember, v1beta1.Member(id)) + } if c.isDCLBasedIAMResource(policyMember) { return c.DCLIAMClient.GetPolicyMember(ctx, c.TFIAMClient, policyMember) } @@ -133,6 +155,15 @@ func (c *IAMClient) GetPolicyMember(ctx context.Context, policyMember *v1beta1.I } func (c *IAMClient) DeletePolicyMember(ctx context.Context, policyMember *v1beta1.IAMPolicyMember) error { + if registry.IsIAMDirect(policyMember.Spec.ResourceReference.GroupVersionKind().GroupKind()) { + id, err := ResolveMemberIdentity(ctx, policyMember.Spec.Member, policyMember.Spec.MemberFrom, policyMember.GetNamespace(), c.TFIAMClient) + if err != nil { + return err + } + + return direct.DeleteIAMPolicyMember(ctx, c.kubeClient, policyMember, v1beta1.Member(id)) + } + if c.isDCLBasedIAMResource(policyMember) { return c.DCLIAMClient.DeletePolicyMember(ctx, c.TFIAMClient, policyMember) diff --git a/pkg/k8s/errors.go b/pkg/k8s/errors.go index 1763ee9bbc..f095c15392 100644 --- a/pkg/k8s/errors.go +++ b/pkg/k8s/errors.go @@ -87,6 +87,8 @@ func IsReferenceNotFoundError(err error) bool { return ok } +var ErrIAMNotFound = fmt.Errorf("IAM resource does not exist") + type SecretNotFoundError struct { Secret types.NamespacedName } diff --git a/pkg/webhook/iam_validator.go b/pkg/webhook/iam_validator.go index d815e3c16a..adf4dc850c 100644 --- a/pkg/webhook/iam_validator.go +++ b/pkg/webhook/iam_validator.go @@ -21,6 +21,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/core/v1alpha1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/iam/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" kcciamclient "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/iam/iamclient" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/dcl/extension" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/dcl/metadata" @@ -292,6 +293,7 @@ func (a *iamValidatorHandler) tfValidateIAMPartialPolicy(partialPolicy *v1beta1. func (a *iamValidatorHandler) dclValidateIAMPolicyMember(policyMember *v1beta1.IAMPolicyMember) admission.Response { resourceRef := policyMember.Spec.ResourceReference + // Check that DCL-based resource supports IAMPolicy dclSchema, resp := getDCLSchema(resourceRef.GroupVersionKind(), a.serviceMetadataLoader, a.schemaLoader) if !resp.Allowed { @@ -301,6 +303,10 @@ func (a *iamValidatorHandler) dclValidateIAMPolicyMember(policyMember *v1beta1.I if err != nil { return admission.Errored(http.StatusInternalServerError, err) } + // Beginnings of direct IAM support: direct-IAM added to existing DCL resource + if registry.IsIAMDirect(resourceRef.GroupVersionKind().GroupKind()) { + supportsIAM = true + } if !supportsIAM { return admission.Errored(http.StatusForbidden, fmt.Errorf("GroupVersionKind %v does not support IAM Policy Member", resourceRef.GroupVersionKind())) } diff --git a/tests/e2e/unified_test.go b/tests/e2e/unified_test.go index 64902de1eb..802714c0dd 100644 --- a/tests/e2e/unified_test.go +++ b/tests/e2e/unified_test.go @@ -549,6 +549,9 @@ func runScenario(ctx context.Context, t *testing.T, testPause bool, fixture reso addReplacement("insertTime", "2024-04-01T12:34:56.123456Z") addReplacement("user", "user@example.com") + // Specific to IAM/policy + addReplacement("policy.etag", "abcdef0123A=") + // Specific to vertexai addReplacement("blobStoragePathPrefix", "cloud-ai-platform-00000000-1111-2222-3333-444444444444") addReplacement("response.blobStoragePathPrefix", "cloud-ai-platform-00000000-1111-2222-3333-444444444444") @@ -607,12 +610,11 @@ func runScenario(ctx context.Context, t *testing.T, testPause bool, fixture reso addReplacement("serverCaCert.expirationTime", "2024-04-01T12:34:56.123456Z") // Specific to KMS - - addReplacement("policy.etag", "abcdef0123A=") addSetStringReplacement(".cryptoKeyVersions[].createTime", "2024-04-01T12:34:56.123456Z") addSetStringReplacement(".cryptoKeyVersions[].generateTime", "2024-04-01T12:34:56.123456Z") addReplacement("destroyTime", "2024-04-01T12:34:56.123456Z") addReplacement("generateTime", "2024-04-01T12:34:56.123456Z") + // Replace any empty values in LROs; this is surprisingly difficult to fix in mockgcp // // "response": { From 257598e6e9ccdd633f9404105d61bc910610cc31 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 22 May 2024 19:32:16 -0400 Subject: [PATCH 066/101] tests: add test for IAM for PrivateCACAPool --- ...ated_object_privatecacapooliam.golden.yaml | 30 + .../privatecacapooliam/_http.log | 818 ++++++++++++++++++ .../privatecacapooliam/create.yaml | 27 + .../privatecacapooliam/dependencies.yaml | 100 +++ 4 files changed, 975 insertions(+) create mode 100644 pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_generated_object_privatecacapooliam.golden.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_http.log create mode 100644 pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/create.yaml create mode 100644 pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/dependencies.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_generated_object_privatecacapooliam.golden.yaml b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_generated_object_privatecacapooliam.golden.yaml new file mode 100644 index 0000000000..c9a58a241e --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_generated_object_privatecacapooliam.golden.yaml @@ -0,0 +1,30 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + labels: + cnrm-test: "true" + name: iampolicymember-${uniqueId} + namespace: ${uniqueId} +spec: + memberFrom: + serviceAccountRef: + name: privatecacapool-dep + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool-${uniqueId} + role: roles/privateca.admin +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 1 diff --git a/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_http.log b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_http.log new file mode 100644 index 0000000000..7869a11f7f --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/_http.log @@ -0,0 +1,818 @@ +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "Unknown service account", + "reason": "notFound" + } + ], + "message": "Unknown service account", + "status": "NOT_FOUND" + } +} + +--- + +POST https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts?alt=json&prettyPrint=false +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "accountId": "capool-${uniqueId}", + "serviceAccount": { + "displayName": "ExampleGSA" + } +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "ExampleGSA", + "email": "capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "ExampleGSA", + "email": "capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Resource 'projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}' was not found", + "status": "NOT_FOUND" + } +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools?alt=json&caPoolId=privatecacapool-${uniqueId} +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "issuancePolicy": { + "allowedIssuanceModes": { + "allowConfigBasedIssuance": false, + "allowCsrBasedIssuance": true + }, + "allowedKeyTypes": [ + { + "rsa": { + "maxModulusSize": 128, + "minModulusSize": 64 + } + }, + { + "ellipticCurve": { + "signatureAlgorithm": "ECDSA_P384" + } + } + ], + "baselineValues": { + "additionalExtensions": [ + { + "critical": false, + "objectId": { + "objectIdPath": [ + 1, + 7 + ] + }, + "value": "c3RyaW5nCg==" + } + ], + "aiaOcspServers": [ + "string" + ], + "caOptions": { + "isCa": false, + "maxIssuerPathLength": 7 + }, + "keyUsage": { + "baseKeyUsage": { + "certSign": false, + "contentCommitment": false, + "crlSign": false, + "dataEncipherment": false, + "decipherOnly": false, + "digitalSignature": false, + "encipherOnly": false, + "keyAgreement": false, + "keyEncipherment": false + }, + "extendedKeyUsage": { + "clientAuth": false, + "codeSigning": false, + "emailProtection": false, + "ocspSigning": false, + "serverAuth": false, + "timeStamping": false + }, + "unknownExtendedKeyUsages": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "policyIds": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "identityConstraints": { + "allowSubjectAltNamesPassthrough": false, + "allowSubjectPassthrough": false, + "celExpression": { + "description": "Always false", + "expression": "false", + "location": "devops.ca_pool.json", + "title": "Sample expression" + } + }, + "maximumLifetime": "43200s", + "passthroughExtensions": { + "additionalExtensions": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ], + "knownExtensions": [ + "BASE_KEY_USAGE" + ] + } + }, + "labels": { + "cnrm-test": "true", + "label-two": "value-two", + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "tier": "ENTERPRISE" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "verb": "create" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/operations/${operationID}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "done": true, + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "endTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "verb": "create" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}", + "response": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.CaPool", + "issuancePolicy": { + "allowedIssuanceModes": { + "allowCsrBasedIssuance": true + }, + "allowedKeyTypes": [ + { + "rsa": { + "maxModulusSize": "128", + "minModulusSize": "64" + } + }, + { + "ellipticCurve": { + "signatureAlgorithm": "ECDSA_P384" + } + } + ], + "baselineValues": { + "additionalExtensions": [ + { + "objectId": { + "objectIdPath": [ + 1, + 7 + ] + }, + "value": "c3RyaW5nCg==" + } + ], + "aiaOcspServers": [ + "string" + ], + "caOptions": { + "isCa": false, + "maxIssuerPathLength": 7 + }, + "keyUsage": { + "unknownExtendedKeyUsages": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "policyIds": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "identityConstraints": { + "allowSubjectAltNamesPassthrough": false, + "allowSubjectPassthrough": false, + "celExpression": { + "description": "Always false", + "expression": "false", + "location": "devops.ca_pool.json", + "title": "Sample expression" + } + }, + "maximumLifetime": "43200s", + "passthroughExtensions": { + "additionalExtensions": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ], + "knownExtensions": [ + "BASE_KEY_USAGE" + ] + } + }, + "labels": { + "cnrm-test": "true", + "label-two": "value-two", + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "tier": "ENTERPRISE" + } +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "issuancePolicy": { + "allowedIssuanceModes": { + "allowCsrBasedIssuance": true + }, + "allowedKeyTypes": [ + { + "rsa": { + "maxModulusSize": "128", + "minModulusSize": "64" + } + }, + { + "ellipticCurve": { + "signatureAlgorithm": "ECDSA_P384" + } + } + ], + "baselineValues": { + "additionalExtensions": [ + { + "objectId": { + "objectIdPath": [ + 1, + 7 + ] + }, + "value": "c3RyaW5nCg==" + } + ], + "aiaOcspServers": [ + "string" + ], + "caOptions": { + "isCa": false, + "maxIssuerPathLength": 7 + }, + "keyUsage": { + "unknownExtendedKeyUsages": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "policyIds": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "identityConstraints": { + "allowSubjectAltNamesPassthrough": false, + "allowSubjectPassthrough": false, + "celExpression": { + "description": "Always false", + "expression": "false", + "location": "devops.ca_pool.json", + "title": "Sample expression" + } + }, + "maximumLifetime": "43200s", + "passthroughExtensions": { + "additionalExtensions": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ], + "knownExtensions": [ + "BASE_KEY_USAGE" + ] + } + }, + "labels": { + "cnrm-test": "true", + "label-two": "value-two", + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "tier": "ENTERPRISE" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fprivatecacapool-${uniqueId} + + + +{ + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fprivatecacapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "members": [ + "serviceAccount:capool-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:capool-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fprivatecacapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:capool-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fprivatecacapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "issuancePolicy": { + "allowedIssuanceModes": { + "allowCsrBasedIssuance": true + }, + "allowedKeyTypes": [ + { + "rsa": { + "maxModulusSize": "128", + "minModulusSize": "64" + } + }, + { + "ellipticCurve": { + "signatureAlgorithm": "ECDSA_P384" + } + } + ], + "baselineValues": { + "additionalExtensions": [ + { + "objectId": { + "objectIdPath": [ + 1, + 7 + ] + }, + "value": "c3RyaW5nCg==" + } + ], + "aiaOcspServers": [ + "string" + ], + "caOptions": { + "isCa": false, + "maxIssuerPathLength": 7 + }, + "keyUsage": { + "unknownExtendedKeyUsages": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "policyIds": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ] + }, + "identityConstraints": { + "allowSubjectAltNamesPassthrough": false, + "allowSubjectPassthrough": false, + "celExpression": { + "description": "Always false", + "expression": "false", + "location": "devops.ca_pool.json", + "title": "Sample expression" + } + }, + "maximumLifetime": "43200s", + "passthroughExtensions": { + "additionalExtensions": [ + { + "objectIdPath": [ + 1, + 7 + ] + } + ], + "knownExtensions": [ + "BASE_KEY_USAGE" + ] + } + }, + "labels": { + "cnrm-test": "true", + "label-two": "value-two", + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "tier": "ENTERPRISE" +} + +--- + +DELETE https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "verb": "delete" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/operations/${operationID}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "done": true, + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "endTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}", + "verb": "delete" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}", + "response": { + "@type": "type.googleapis.com/google.protobuf.Empty" + } +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Resource 'projects/${projectId}/locations/us-central1/caPools/privatecacapool-${uniqueId}' was not found", + "status": "NOT_FOUND" + } +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "ExampleGSA", + "email": "capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +DELETE https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/capool-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/create.yaml b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/create.yaml new file mode 100644 index 0000000000..ef95aa99c2 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/create.yaml @@ -0,0 +1,27 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: iampolicymember-${uniqueId} +spec: + memberFrom: + serviceAccountRef: + name: privatecacapool-dep + role: roles/privateca.admin + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/dependencies.yaml new file mode 100644 index 0000000000..3e39e2f121 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/privateca/v1beta1/privatecacapool/privatecacapooliam/dependencies.yaml @@ -0,0 +1,100 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +metadata: + name: privatecacapool-dep +spec: + displayName: ExampleGSA + resourceID: capool-${uniqueId} + +--- + +apiVersion: privateca.cnrm.cloud.google.com/v1beta1 +kind: PrivateCACAPool +metadata: + labels: + label-two: "value-two" + name: privatecacapool-${uniqueId} +spec: + projectRef: + external: projects/${projectId} + location: "us-central1" + tier: ENTERPRISE + issuancePolicy: + allowedKeyTypes: + - rsa: + minModulusSize: 64 + maxModulusSize: 128 + - ellipticCurve: + signatureAlgorithm: ECDSA_P384 + maximumLifetime: 43200s + allowedIssuanceModes: + allowCsrBasedIssuance: true + allowConfigBasedIssuance: false + baselineValues: + keyUsage: + baseKeyUsage: + digitalSignature: false + contentCommitment: false + keyEncipherment: false + dataEncipherment: false + keyAgreement: false + certSign: false + crlSign: false + encipherOnly: false + decipherOnly: false + extendedKeyUsage: + serverAuth: false + clientAuth: false + codeSigning: false + emailProtection: false + timeStamping: false + ocspSigning: false + unknownExtendedKeyUsages: + - objectIdPath: + - 1 + - 7 + caOptions: + isCa: false + maxIssuerPathLength: 7 + policyIds: + - objectIdPath: + - 1 + - 7 + aiaOcspServers: + - string + additionalExtensions: + - objectId: + objectIdPath: + - 1 + - 7 + critical: false + value: c3RyaW5nCg== + identityConstraints: + celExpression: + title: Sample expression + description: Always false + expression: 'false' + location: devops.ca_pool.json + allowSubjectPassthrough: false + allowSubjectAltNamesPassthrough: false + passthroughExtensions: + knownExtensions: + - BASE_KEY_USAGE + additionalExtensions: + - objectIdPath: + - 1 + - 7 From d142a3f0647d1a931326ec7484892cfbae520487 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Wed, 26 Jun 2024 22:43:38 +0000 Subject: [PATCH 067/101] make sure all mutable fields can be changed --- .../_generated_object_cloudbuildworkerpool.golden.yaml | 6 +++--- .../cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log | 8 ++++---- .../cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml | 1 - .../cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml | 6 +++--- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index 94c00dde23..ca7ad1f3af 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -10,17 +10,17 @@ metadata: name: cloudbuildworkerpool-${uniqueId} namespace: ${uniqueId} spec: - displayName: New CloudBuild WorkerPool + displayName: Updated CloudBuild WorkerPool location: us-central1 privatePoolV1Config: networkConfig: - egressOption: NO_PUBLIC_EGRESS + egressOption: PUBLIC_EGRESS peeredNetworkIPRange: /29 peeredNetworkRef: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} workerConfig: diskSizeGb: 150 - machineType: e2-medium + machineType: e2-highmem-4 projectRef: external: projects/${projectId} status: diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log index 0a16b7b480..39aab02a27 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log @@ -646,22 +646,22 @@ X-Xss-Protection: 0 --- -PATCH https://cloudbuild.googleapis.com/v1/projects/${projectId}/locations/us-central1/workerPools/cloudbuildworkerpool-${uniqueId}?%24alt=json%3Benum-encoding%3Dint&updateMask=privatePoolV1Config.workerConfig.diskSizeGb%2CprivatePoolV1Config.workerConfig.machineType +PATCH https://cloudbuild.googleapis.com/v1/projects/${projectId}/locations/us-central1/workerPools/cloudbuildworkerpool-${uniqueId}?%24alt=json%3Benum-encoding%3Dint&updateMask=displayName%2CprivatePoolV1Config.networkConfig.egressOption%2CprivatePoolV1Config.workerConfig.diskSizeGb%2CprivatePoolV1Config.workerConfig.machineType Content-Type: application/json x-goog-request-params: location=us-central1 { - "displayName": "New CloudBuild WorkerPool", + "displayName": "Updated CloudBuild WorkerPool", "name": "projects/${projectId}/locations/us-central1/workerPools/cloudbuildworkerpool-${uniqueId}", "privatePoolV1Config": { "networkConfig": { - "egressOption": 1, + "egressOption": 2, "peeredNetwork": "projects/${projectId}/global/networks/computenetwork-${uniqueId}", "peeredNetworkIpRange": "/29" }, "workerConfig": { "diskSizeGb": "150", - "machineType": "e2-medium" + "machineType": "e2-highmem-4" } } } diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml index 0447273411..e944b2e17c 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml @@ -29,4 +29,3 @@ spec: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} egressOption: NO_PUBLIC_EGRESS peeredNetworkIPRange: /29 - diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml index e33f218a2c..3347095713 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml @@ -19,14 +19,14 @@ spec: projectRef: external: projects/${projectId} location: us-central1 - displayName: New CloudBuild WorkerPool + displayName: Updated CloudBuild WorkerPool privatePoolV1Config: workerConfig: - machineType: e2-medium + machineType: e2-highmem-4 diskSizeGb: 150 networkConfig: peeredNetworkRef: external: projects/${projectId}/global/networks/computenetwork-${uniqueId} - egressOption: NO_PUBLIC_EGRESS + egressOption: PUBLIC_EGRESS peeredNetworkIPRange: /29 From a5ea683242c55f6f831b8050f5ea3ed172694e2b Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Thu, 27 Jun 2024 05:55:40 +0000 Subject: [PATCH 068/101] feat: Bump cbwp from alpha to beta --- apis/cloudbuild/{v1alpha1 => v1beta1}/doc.go | 2 +- .../groupversion_info.go | 6 +- .../{v1alpha1 => v1beta1}/workerpool_types.go | 2 +- .../zz_generated.deepcopy.go | 10 +- ...ools.cloudbuild.cnrm.cloud.google.com.yaml | 2 +- .../generated/apis/cloudbuild/v1alpha1/doc.go | 41 --- .../apis/cloudbuild/v1alpha1/register.go | 63 ---- .../v1alpha1/zz_generated.deepcopy.go | 328 ------------------ .../cloudbuildworkerpool_types.go | 2 +- .../apis/cloudbuild/v1beta1/register.go | 6 + .../v1beta1/zz_generated.deepcopy.go | 298 ++++++++++++++++ .../client/clientset/versioned/clientset.go | 13 - .../versioned/fake/clientset_generated.go | 7 - .../clientset/versioned/fake/register.go | 2 - .../clientset/versioned/scheme/register.go | 2 - .../cloudbuild/v1alpha1/cloudbuild_client.go | 110 ------ .../typed/cloudbuild/v1alpha1/doc.go | 23 -- .../typed/cloudbuild/v1alpha1/fake/doc.go | 23 -- .../v1alpha1/fake/fake_cloudbuild_client.go | 43 --- .../v1alpha1/generated_expansion.go | 24 -- .../cloudbuild/v1beta1/cloudbuild_client.go | 5 + .../cloudbuildworkerpool.go | 42 +-- .../v1beta1/fake/fake_cloudbuild_client.go | 4 + .../fake/fake_cloudbuildworkerpool.go | 50 +-- .../cloudbuild/v1beta1/generated_expansion.go | 2 + .../cloudbuild/workerpool_controller.go | 2 +- .../direct/cloudbuild/workerpool_mappings.go | 2 +- ...ed_object_cloudbuildworkerpool.golden.yaml | 2 +- .../cloudbuildworkerpool/_http.log | 0 .../cloudbuildworkerpool/create.yaml | 2 +- .../cloudbuildworkerpool/dependencies.yaml | 0 .../cloudbuildworkerpool/update.yaml | 2 +- 32 files changed, 378 insertions(+), 742 deletions(-) rename apis/cloudbuild/{v1alpha1 => v1beta1}/doc.go (97%) rename apis/cloudbuild/{v1alpha1 => v1beta1}/groupversion_info.go (90%) rename apis/cloudbuild/{v1alpha1 => v1beta1}/workerpool_types.go (99%) rename apis/cloudbuild/{v1alpha1 => v1beta1}/zz_generated.deepcopy.go (96%) delete mode 100644 pkg/clients/generated/apis/cloudbuild/v1alpha1/doc.go delete mode 100644 pkg/clients/generated/apis/cloudbuild/v1alpha1/register.go delete mode 100644 pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go rename pkg/clients/generated/apis/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool_types.go (99%) delete mode 100644 pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuild_client.go delete mode 100644 pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/doc.go delete mode 100644 pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/doc.go delete mode 100644 pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuild_client.go delete mode 100644 pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/generated_expansion.go rename pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool.go (77%) rename pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/{v1alpha1 => v1beta1}/fake/fake_cloudbuildworkerpool.go (69%) rename pkg/test/resourcefixture/testdata/basic/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml (96%) rename pkg/test/resourcefixture/testdata/basic/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool/_http.log (100%) rename pkg/test/resourcefixture/testdata/basic/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool/create.yaml (95%) rename pkg/test/resourcefixture/testdata/basic/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool/dependencies.yaml (100%) rename pkg/test/resourcefixture/testdata/basic/cloudbuild/{v1alpha1 => v1beta1}/cloudbuildworkerpool/update.yaml (95%) diff --git a/apis/cloudbuild/v1alpha1/doc.go b/apis/cloudbuild/v1beta1/doc.go similarity index 97% rename from apis/cloudbuild/v1alpha1/doc.go rename to apis/cloudbuild/v1beta1/doc.go index c42d7b0d60..cb34e2b308 100644 --- a/apis/cloudbuild/v1alpha1/doc.go +++ b/apis/cloudbuild/v1beta1/doc.go @@ -14,4 +14,4 @@ // +kcc:proto=google.devtools.cloudbuild.v1 -package v1alpha1 +package v1beta1 diff --git a/apis/cloudbuild/v1alpha1/groupversion_info.go b/apis/cloudbuild/v1beta1/groupversion_info.go similarity index 90% rename from apis/cloudbuild/v1alpha1/groupversion_info.go rename to apis/cloudbuild/v1beta1/groupversion_info.go index c55dd0b164..beb921019f 100644 --- a/apis/cloudbuild/v1alpha1/groupversion_info.go +++ b/apis/cloudbuild/v1beta1/groupversion_info.go @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1alpha1 contains API Schema definitions for the cloudbuild v1alpha1 API group +// Package v1beta1 contains API Schema definitions for the cloudbuild v1beta1 API group // +kubebuilder:object:generate=true // +groupName=cloudbuild.cnrm.cloud.google.com -package v1alpha1 +package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,7 +26,7 @@ import ( var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "cloudbuild.cnrm.cloud.google.com", Version: "v1alpha1"} + GroupVersion = schema.GroupVersion{Group: "cloudbuild.cnrm.cloud.google.com", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/apis/cloudbuild/v1alpha1/workerpool_types.go b/apis/cloudbuild/v1beta1/workerpool_types.go similarity index 99% rename from apis/cloudbuild/v1alpha1/workerpool_types.go rename to apis/cloudbuild/v1beta1/workerpool_types.go index 5714706ca5..cb14a25fcc 100644 --- a/apis/cloudbuild/v1alpha1/workerpool_types.go +++ b/apis/cloudbuild/v1beta1/workerpool_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( refv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" diff --git a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go similarity index 96% rename from apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go rename to apis/cloudbuild/v1beta1/zz_generated.deepcopy.go index 22e40cd60d..a33e8b05bd 100644 --- a/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ b/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go @@ -2,11 +2,11 @@ // Code generated by controller-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( - "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" - k8sv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + refsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -119,7 +119,7 @@ func (in *CloudBuildWorkerPoolSpec) DeepCopyInto(out *CloudBuildWorkerPoolSpec) } if in.ProjectRef != nil { in, out := &in.ProjectRef, &out.ProjectRef - *out = new(v1beta1.ProjectRef) + *out = new(refsv1beta1.ProjectRef) **out = **in } if in.PrivatePoolConfig != nil { @@ -144,7 +144,7 @@ func (in *CloudBuildWorkerPoolStatus) DeepCopyInto(out *CloudBuildWorkerPoolStat *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]k8sv1alpha1.Condition, len(*in)) + *out = make([]v1alpha1.Condition, len(*in)) copy(*out, *in) } if in.ObservedGeneration != nil { diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml index 8b4b5614b2..59fea5504f 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com.yaml @@ -18,7 +18,7 @@ spec: preserveUnknownFields: false scope: Namespaced versions: - - name: v1alpha1 + - name: v1beta1 schema: openAPIV3Schema: description: CloudBuildWorkerPool is the Schema for the CloudBuild WorkerPool diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/doc.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/doc.go deleted file mode 100644 index db56cfeb98..0000000000 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/doc.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Config Connector and manual -// changes will be clobbered when the file is regenerated. -// -// ---------------------------------------------------------------------------- - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Package v1alpha1 contains API Schema definitions for the cloudbuild v1alpha1 API group. -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/cloudbuild -// +k8s:defaulter-gen=TypeMeta -// +groupName=cloudbuild.cnrm.cloud.google.com - -// Generate deepcopy object for cloudbuild/v1alpha1 API group -// -//go:generate go run ../../../../../../scripts/deepcopy-gen/main.go -O zz_generated.deepcopy -i . -h ../../../../../../hack/boilerplate_client_alpha.go.txt -package v1alpha1 diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/register.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/register.go deleted file mode 100644 index a195a0de03..0000000000 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/register.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Config Connector and manual -// changes will be clobbered when the file is regenerated. -// -// ---------------------------------------------------------------------------- - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Package v1alpha1 contains API Schema definitions for the cloudbuild v1alpha1 API group. -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/cloudbuild -// +k8s:defaulter-gen=TypeMeta -// +groupName=cloudbuild.cnrm.cloud.google.com -package v1alpha1 - -import ( - "reflect" - - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // SchemeGroupVersion is the group version used to register these objects. - SchemeGroupVersion = schema.GroupVersion{Group: "cloudbuild.cnrm.cloud.google.com", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme - - CloudBuildWorkerPoolGVK = schema.GroupVersionKind{ - Group: SchemeGroupVersion.Group, - Version: SchemeGroupVersion.Version, - Kind: reflect.TypeOf(CloudBuildWorkerPool{}).Name(), - } - - cloudbuildAPIVersion = SchemeGroupVersion.String() -) diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 69d2a97473..0000000000 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,328 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -import ( - k8sv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudBuildWorkerPool) DeepCopyInto(out *CloudBuildWorkerPool) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPool. -func (in *CloudBuildWorkerPool) DeepCopy() *CloudBuildWorkerPool { - if in == nil { - return nil - } - out := new(CloudBuildWorkerPool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CloudBuildWorkerPool) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudBuildWorkerPoolList) DeepCopyInto(out *CloudBuildWorkerPoolList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CloudBuildWorkerPool, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolList. -func (in *CloudBuildWorkerPoolList) DeepCopy() *CloudBuildWorkerPoolList { - if in == nil { - return nil - } - out := new(CloudBuildWorkerPoolList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CloudBuildWorkerPoolList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudBuildWorkerPoolSpec) DeepCopyInto(out *CloudBuildWorkerPoolSpec) { - *out = *in - if in.DisplayName != nil { - in, out := &in.DisplayName, &out.DisplayName - *out = new(string) - **out = **in - } - in.PrivatePoolV1Config.DeepCopyInto(&out.PrivatePoolV1Config) - out.ProjectRef = in.ProjectRef - if in.ResourceID != nil { - in, out := &in.ResourceID, &out.ResourceID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolSpec. -func (in *CloudBuildWorkerPoolSpec) DeepCopy() *CloudBuildWorkerPoolSpec { - if in == nil { - return nil - } - out := new(CloudBuildWorkerPoolSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudBuildWorkerPoolStatus) DeepCopyInto(out *CloudBuildWorkerPoolStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]k8sv1alpha1.Condition, len(*in)) - copy(*out, *in) - } - if in.ExternalRef != nil { - in, out := &in.ExternalRef, &out.ExternalRef - *out = new(string) - **out = **in - } - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.ObservedState != nil { - in, out := &in.ObservedState, &out.ObservedState - *out = new(WorkerpoolObservedStateStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolStatus. -func (in *CloudBuildWorkerPoolStatus) DeepCopy() *CloudBuildWorkerPoolStatus { - if in == nil { - return nil - } - out := new(CloudBuildWorkerPoolStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolNetworkConfig) DeepCopyInto(out *WorkerpoolNetworkConfig) { - *out = *in - if in.EgressOption != nil { - in, out := &in.EgressOption, &out.EgressOption - *out = new(string) - **out = **in - } - if in.PeeredNetworkIPRange != nil { - in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange - *out = new(string) - **out = **in - } - if in.PeeredNetworkRef != nil { - in, out := &in.PeeredNetworkRef, &out.PeeredNetworkRef - *out = new(k8sv1alpha1.ResourceRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolNetworkConfig. -func (in *WorkerpoolNetworkConfig) DeepCopy() *WorkerpoolNetworkConfig { - if in == nil { - return nil - } - out := new(WorkerpoolNetworkConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolNetworkConfigStatus) DeepCopyInto(out *WorkerpoolNetworkConfigStatus) { - *out = *in - if in.EgressOption != nil { - in, out := &in.EgressOption, &out.EgressOption - *out = new(string) - **out = **in - } - if in.PeeredNetwork != nil { - in, out := &in.PeeredNetwork, &out.PeeredNetwork - *out = new(string) - **out = **in - } - if in.PeeredNetworkIPRange != nil { - in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolNetworkConfigStatus. -func (in *WorkerpoolNetworkConfigStatus) DeepCopy() *WorkerpoolNetworkConfigStatus { - if in == nil { - return nil - } - out := new(WorkerpoolNetworkConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolObservedStateStatus) DeepCopyInto(out *WorkerpoolObservedStateStatus) { - *out = *in - if in.CreateTime != nil { - in, out := &in.CreateTime, &out.CreateTime - *out = new(string) - **out = **in - } - if in.Etag != nil { - in, out := &in.Etag, &out.Etag - *out = new(string) - **out = **in - } - if in.NetworkConfig != nil { - in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(WorkerpoolNetworkConfigStatus) - (*in).DeepCopyInto(*out) - } - if in.UpdateTime != nil { - in, out := &in.UpdateTime, &out.UpdateTime - *out = new(string) - **out = **in - } - in.WorkerConfig.DeepCopyInto(&out.WorkerConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolObservedStateStatus. -func (in *WorkerpoolObservedStateStatus) DeepCopy() *WorkerpoolObservedStateStatus { - if in == nil { - return nil - } - out := new(WorkerpoolObservedStateStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolPrivatePoolV1Config) DeepCopyInto(out *WorkerpoolPrivatePoolV1Config) { - *out = *in - if in.NetworkConfig != nil { - in, out := &in.NetworkConfig, &out.NetworkConfig - *out = new(WorkerpoolNetworkConfig) - (*in).DeepCopyInto(*out) - } - in.WorkerConfig.DeepCopyInto(&out.WorkerConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolPrivatePoolV1Config. -func (in *WorkerpoolPrivatePoolV1Config) DeepCopy() *WorkerpoolPrivatePoolV1Config { - if in == nil { - return nil - } - out := new(WorkerpoolPrivatePoolV1Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolWorkerConfig) DeepCopyInto(out *WorkerpoolWorkerConfig) { - *out = *in - if in.DiskSizeGb != nil { - in, out := &in.DiskSizeGb, &out.DiskSizeGb - *out = new(int64) - **out = **in - } - if in.MachineType != nil { - in, out := &in.MachineType, &out.MachineType - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolWorkerConfig. -func (in *WorkerpoolWorkerConfig) DeepCopy() *WorkerpoolWorkerConfig { - if in == nil { - return nil - } - out := new(WorkerpoolWorkerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerpoolWorkerConfigStatus) DeepCopyInto(out *WorkerpoolWorkerConfigStatus) { - *out = *in - if in.DiskSizeGb != nil { - in, out := &in.DiskSizeGb, &out.DiskSizeGb - *out = new(int64) - **out = **in - } - if in.MachineType != nil { - in, out := &in.MachineType, &out.MachineType - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolWorkerConfigStatus. -func (in *WorkerpoolWorkerConfigStatus) DeepCopy() *WorkerpoolWorkerConfigStatus { - if in == nil { - return nil - } - out := new(WorkerpoolWorkerConfigStatus) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go b/pkg/clients/generated/apis/cloudbuild/v1beta1/cloudbuildworkerpool_types.go similarity index 99% rename from pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go rename to pkg/clients/generated/apis/cloudbuild/v1beta1/cloudbuildworkerpool_types.go index 12d4f8cbd4..4f54c0cee9 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1alpha1/cloudbuildworkerpool_types.go +++ b/pkg/clients/generated/apis/cloudbuild/v1beta1/cloudbuildworkerpool_types.go @@ -28,7 +28,7 @@ // that future versions of the go-client may include breaking changes. // Please try it out and give us feedback! -package v1alpha1 +package v1beta1 import ( "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" diff --git a/pkg/clients/generated/apis/cloudbuild/v1beta1/register.go b/pkg/clients/generated/apis/cloudbuild/v1beta1/register.go index 0570d8665b..e05640d08a 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1beta1/register.go +++ b/pkg/clients/generated/apis/cloudbuild/v1beta1/register.go @@ -59,5 +59,11 @@ var ( Kind: reflect.TypeOf(CloudBuildTrigger{}).Name(), } + CloudBuildWorkerPoolGVK = schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: reflect.TypeOf(CloudBuildWorkerPool{}).Name(), + } + cloudbuildAPIVersion = SchemeGroupVersion.String() ) diff --git a/pkg/clients/generated/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go index 450c2a50e6..2d3fd88418 100644 --- a/pkg/clients/generated/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudbuild/v1beta1/zz_generated.deepcopy.go @@ -249,6 +249,131 @@ func (in *CloudBuildTriggerStatus) DeepCopy() *CloudBuildTriggerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBuildWorkerPool) DeepCopyInto(out *CloudBuildWorkerPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPool. +func (in *CloudBuildWorkerPool) DeepCopy() *CloudBuildWorkerPool { + if in == nil { + return nil + } + out := new(CloudBuildWorkerPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBuildWorkerPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBuildWorkerPoolList) DeepCopyInto(out *CloudBuildWorkerPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudBuildWorkerPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolList. +func (in *CloudBuildWorkerPoolList) DeepCopy() *CloudBuildWorkerPoolList { + if in == nil { + return nil + } + out := new(CloudBuildWorkerPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBuildWorkerPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBuildWorkerPoolSpec) DeepCopyInto(out *CloudBuildWorkerPoolSpec) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + in.PrivatePoolV1Config.DeepCopyInto(&out.PrivatePoolV1Config) + out.ProjectRef = in.ProjectRef + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolSpec. +func (in *CloudBuildWorkerPoolSpec) DeepCopy() *CloudBuildWorkerPoolSpec { + if in == nil { + return nil + } + out := new(CloudBuildWorkerPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBuildWorkerPoolStatus) DeepCopyInto(out *CloudBuildWorkerPoolStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1alpha1.Condition, len(*in)) + copy(*out, *in) + } + if in.ExternalRef != nil { + in, out := &in.ExternalRef, &out.ExternalRef + *out = new(string) + **out = **in + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ObservedState != nil { + in, out := &in.ObservedState, &out.ObservedState + *out = new(WorkerpoolObservedStateStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBuildWorkerPoolStatus. +func (in *CloudBuildWorkerPoolStatus) DeepCopy() *CloudBuildWorkerPoolStatus { + if in == nil { + return nil + } + out := new(CloudBuildWorkerPoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TriggerApprovalConfig) DeepCopyInto(out *TriggerApprovalConfig) { *out = *in @@ -1111,3 +1236,176 @@ func (in *TriggerWebhookConfig) DeepCopy() *TriggerWebhookConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolNetworkConfig) DeepCopyInto(out *WorkerpoolNetworkConfig) { + *out = *in + if in.EgressOption != nil { + in, out := &in.EgressOption, &out.EgressOption + *out = new(string) + **out = **in + } + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + *out = new(string) + **out = **in + } + if in.PeeredNetworkRef != nil { + in, out := &in.PeeredNetworkRef, &out.PeeredNetworkRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolNetworkConfig. +func (in *WorkerpoolNetworkConfig) DeepCopy() *WorkerpoolNetworkConfig { + if in == nil { + return nil + } + out := new(WorkerpoolNetworkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolNetworkConfigStatus) DeepCopyInto(out *WorkerpoolNetworkConfigStatus) { + *out = *in + if in.EgressOption != nil { + in, out := &in.EgressOption, &out.EgressOption + *out = new(string) + **out = **in + } + if in.PeeredNetwork != nil { + in, out := &in.PeeredNetwork, &out.PeeredNetwork + *out = new(string) + **out = **in + } + if in.PeeredNetworkIPRange != nil { + in, out := &in.PeeredNetworkIPRange, &out.PeeredNetworkIPRange + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolNetworkConfigStatus. +func (in *WorkerpoolNetworkConfigStatus) DeepCopy() *WorkerpoolNetworkConfigStatus { + if in == nil { + return nil + } + out := new(WorkerpoolNetworkConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolObservedStateStatus) DeepCopyInto(out *WorkerpoolObservedStateStatus) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.NetworkConfig != nil { + in, out := &in.NetworkConfig, &out.NetworkConfig + *out = new(WorkerpoolNetworkConfigStatus) + (*in).DeepCopyInto(*out) + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime + *out = new(string) + **out = **in + } + in.WorkerConfig.DeepCopyInto(&out.WorkerConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolObservedStateStatus. +func (in *WorkerpoolObservedStateStatus) DeepCopy() *WorkerpoolObservedStateStatus { + if in == nil { + return nil + } + out := new(WorkerpoolObservedStateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolPrivatePoolV1Config) DeepCopyInto(out *WorkerpoolPrivatePoolV1Config) { + *out = *in + if in.NetworkConfig != nil { + in, out := &in.NetworkConfig, &out.NetworkConfig + *out = new(WorkerpoolNetworkConfig) + (*in).DeepCopyInto(*out) + } + in.WorkerConfig.DeepCopyInto(&out.WorkerConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolPrivatePoolV1Config. +func (in *WorkerpoolPrivatePoolV1Config) DeepCopy() *WorkerpoolPrivatePoolV1Config { + if in == nil { + return nil + } + out := new(WorkerpoolPrivatePoolV1Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolWorkerConfig) DeepCopyInto(out *WorkerpoolWorkerConfig) { + *out = *in + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(int64) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolWorkerConfig. +func (in *WorkerpoolWorkerConfig) DeepCopy() *WorkerpoolWorkerConfig { + if in == nil { + return nil + } + out := new(WorkerpoolWorkerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerpoolWorkerConfigStatus) DeepCopyInto(out *WorkerpoolWorkerConfigStatus) { + *out = *in + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(int64) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerpoolWorkerConfigStatus. +func (in *WorkerpoolWorkerConfigStatus) DeepCopy() *WorkerpoolWorkerConfigStatus { + if in == nil { + return nil + } + out := new(WorkerpoolWorkerConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/clients/generated/client/clientset/versioned/clientset.go b/pkg/clients/generated/client/clientset/versioned/clientset.go index f58e445b9a..030965862b 100644 --- a/pkg/clients/generated/client/clientset/versioned/clientset.go +++ b/pkg/clients/generated/client/clientset/versioned/clientset.go @@ -47,7 +47,6 @@ import ( binaryauthorizationv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/binaryauthorization/v1beta1" certificatemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/certificatemanager/v1beta1" cloudassetv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudasset/v1alpha1" - cloudbuildv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1" cloudbuildv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1" cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions2/v1alpha1" @@ -172,7 +171,6 @@ type Interface interface { BinaryauthorizationV1beta1() binaryauthorizationv1beta1.BinaryauthorizationV1beta1Interface CertificatemanagerV1beta1() certificatemanagerv1beta1.CertificatemanagerV1beta1Interface CloudassetV1alpha1() cloudassetv1alpha1.CloudassetV1alpha1Interface - CloudbuildV1alpha1() cloudbuildv1alpha1.CloudbuildV1alpha1Interface CloudbuildV1beta1() cloudbuildv1beta1.CloudbuildV1beta1Interface CloudfunctionsV1beta1() cloudfunctionsv1beta1.CloudfunctionsV1beta1Interface Cloudfunctions2V1alpha1() cloudfunctions2v1alpha1.Cloudfunctions2V1alpha1Interface @@ -295,7 +293,6 @@ type Clientset struct { binaryauthorizationV1beta1 *binaryauthorizationv1beta1.BinaryauthorizationV1beta1Client certificatemanagerV1beta1 *certificatemanagerv1beta1.CertificatemanagerV1beta1Client cloudassetV1alpha1 *cloudassetv1alpha1.CloudassetV1alpha1Client - cloudbuildV1alpha1 *cloudbuildv1alpha1.CloudbuildV1alpha1Client cloudbuildV1beta1 *cloudbuildv1beta1.CloudbuildV1beta1Client cloudfunctionsV1beta1 *cloudfunctionsv1beta1.CloudfunctionsV1beta1Client cloudfunctions2V1alpha1 *cloudfunctions2v1alpha1.Cloudfunctions2V1alpha1Client @@ -503,11 +500,6 @@ func (c *Clientset) CloudassetV1alpha1() cloudassetv1alpha1.CloudassetV1alpha1In return c.cloudassetV1alpha1 } -// CloudbuildV1alpha1 retrieves the CloudbuildV1alpha1Client -func (c *Clientset) CloudbuildV1alpha1() cloudbuildv1alpha1.CloudbuildV1alpha1Interface { - return c.cloudbuildV1alpha1 -} - // CloudbuildV1beta1 retrieves the CloudbuildV1beta1Client func (c *Clientset) CloudbuildV1beta1() cloudbuildv1beta1.CloudbuildV1beta1Interface { return c.cloudbuildV1beta1 @@ -1115,10 +1107,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.cloudbuildV1alpha1, err = cloudbuildv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.cloudbuildV1beta1, err = cloudbuildv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -1542,7 +1530,6 @@ func New(c rest.Interface) *Clientset { cs.binaryauthorizationV1beta1 = binaryauthorizationv1beta1.New(c) cs.certificatemanagerV1beta1 = certificatemanagerv1beta1.New(c) cs.cloudassetV1alpha1 = cloudassetv1alpha1.New(c) - cs.cloudbuildV1alpha1 = cloudbuildv1alpha1.New(c) cs.cloudbuildV1beta1 = cloudbuildv1beta1.New(c) cs.cloudfunctionsV1beta1 = cloudfunctionsv1beta1.New(c) cs.cloudfunctions2V1alpha1 = cloudfunctions2v1alpha1.New(c) diff --git a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go index efbc71413e..4d656cb351 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go @@ -67,8 +67,6 @@ import ( fakecertificatemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/certificatemanager/v1beta1/fake" cloudassetv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudasset/v1alpha1" fakecloudassetv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudasset/v1alpha1/fake" - cloudbuildv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1" - fakecloudbuildv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake" cloudbuildv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1" fakecloudbuildv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake" cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudfunctions/v1beta1" @@ -426,11 +424,6 @@ func (c *Clientset) CloudassetV1alpha1() cloudassetv1alpha1.CloudassetV1alpha1In return &fakecloudassetv1alpha1.FakeCloudassetV1alpha1{Fake: &c.Fake} } -// CloudbuildV1alpha1 retrieves the CloudbuildV1alpha1Client -func (c *Clientset) CloudbuildV1alpha1() cloudbuildv1alpha1.CloudbuildV1alpha1Interface { - return &fakecloudbuildv1alpha1.FakeCloudbuildV1alpha1{Fake: &c.Fake} -} - // CloudbuildV1beta1 retrieves the CloudbuildV1beta1Client func (c *Clientset) CloudbuildV1beta1() cloudbuildv1beta1.CloudbuildV1beta1Interface { return &fakecloudbuildv1beta1.FakeCloudbuildV1beta1{Fake: &c.Fake} diff --git a/pkg/clients/generated/client/clientset/versioned/fake/register.go b/pkg/clients/generated/client/clientset/versioned/fake/register.go index 2c1c132322..734e4f1b41 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/register.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/register.go @@ -44,7 +44,6 @@ import ( binaryauthorizationv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/binaryauthorization/v1beta1" certificatemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/certificatemanager/v1beta1" cloudassetv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudasset/v1alpha1" - cloudbuildv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1alpha1" cloudbuildv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1beta1" cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions2/v1alpha1" @@ -173,7 +172,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ binaryauthorizationv1beta1.AddToScheme, certificatemanagerv1beta1.AddToScheme, cloudassetv1alpha1.AddToScheme, - cloudbuildv1alpha1.AddToScheme, cloudbuildv1beta1.AddToScheme, cloudfunctionsv1beta1.AddToScheme, cloudfunctions2v1alpha1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/scheme/register.go b/pkg/clients/generated/client/clientset/versioned/scheme/register.go index 05dba3ff06..330900f56f 100644 --- a/pkg/clients/generated/client/clientset/versioned/scheme/register.go +++ b/pkg/clients/generated/client/clientset/versioned/scheme/register.go @@ -44,7 +44,6 @@ import ( binaryauthorizationv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/binaryauthorization/v1beta1" certificatemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/certificatemanager/v1beta1" cloudassetv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudasset/v1alpha1" - cloudbuildv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1alpha1" cloudbuildv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1beta1" cloudfunctionsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions/v1beta1" cloudfunctions2v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudfunctions2/v1alpha1" @@ -173,7 +172,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ binaryauthorizationv1beta1.AddToScheme, certificatemanagerv1beta1.AddToScheme, cloudassetv1alpha1.AddToScheme, - cloudbuildv1alpha1.AddToScheme, cloudbuildv1beta1.AddToScheme, cloudfunctionsv1beta1.AddToScheme, cloudfunctions2v1alpha1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuild_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuild_client.go deleted file mode 100644 index 170d708e3b..0000000000 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuild_client.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1alpha1" - "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type CloudbuildV1alpha1Interface interface { - RESTClient() rest.Interface - CloudBuildWorkerPoolsGetter -} - -// CloudbuildV1alpha1Client is used to interact with features provided by the cloudbuild.cnrm.cloud.google.com group. -type CloudbuildV1alpha1Client struct { - restClient rest.Interface -} - -func (c *CloudbuildV1alpha1Client) CloudBuildWorkerPools(namespace string) CloudBuildWorkerPoolInterface { - return newCloudBuildWorkerPools(c, namespace) -} - -// NewForConfig creates a new CloudbuildV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*CloudbuildV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new CloudbuildV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CloudbuildV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &CloudbuildV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new CloudbuildV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CloudbuildV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CloudbuildV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *CloudbuildV1alpha1Client { - return &CloudbuildV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CloudbuildV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/doc.go deleted file mode 100644 index 61f2499ab1..0000000000 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/doc.go deleted file mode 100644 index 7a39491606..0000000000 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuild_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuild_client.go deleted file mode 100644 index b13233079d..0000000000 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuild_client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCloudbuildV1alpha1 struct { - *testing.Fake -} - -func (c *FakeCloudbuildV1alpha1) CloudBuildWorkerPools(namespace string) v1alpha1.CloudBuildWorkerPoolInterface { - return &FakeCloudBuildWorkerPools{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCloudbuildV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/generated_expansion.go deleted file mode 100644 index c5d6618532..0000000000 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// *** DISCLAIMER *** -// Config Connector's go-client for CRDs is currently in ALPHA, which means -// that future versions of the go-client may include breaking changes. -// Please try it out and give us feedback! - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -type CloudBuildWorkerPoolExpansion interface{} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuild_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuild_client.go index 21966479bb..af03120bd4 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuild_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuild_client.go @@ -32,6 +32,7 @@ import ( type CloudbuildV1beta1Interface interface { RESTClient() rest.Interface CloudBuildTriggersGetter + CloudBuildWorkerPoolsGetter } // CloudbuildV1beta1Client is used to interact with features provided by the cloudbuild.cnrm.cloud.google.com group. @@ -43,6 +44,10 @@ func (c *CloudbuildV1beta1Client) CloudBuildTriggers(namespace string) CloudBuil return newCloudBuildTriggers(c, namespace) } +func (c *CloudbuildV1beta1Client) CloudBuildWorkerPools(namespace string) CloudBuildWorkerPoolInterface { + return newCloudBuildWorkerPools(c, namespace) +} + // NewForConfig creates a new CloudbuildV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuildworkerpool.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuildworkerpool.go similarity index 77% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuildworkerpool.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuildworkerpool.go index 8159f96991..9956bbcda8 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/cloudbuildworkerpool.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/cloudbuildworkerpool.go @@ -19,13 +19,13 @@ // Code generated by main. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "context" "time" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1beta1" scheme "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -41,15 +41,15 @@ type CloudBuildWorkerPoolsGetter interface { // CloudBuildWorkerPoolInterface has methods to work with CloudBuildWorkerPool resources. type CloudBuildWorkerPoolInterface interface { - Create(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.CreateOptions) (*v1alpha1.CloudBuildWorkerPool, error) - Update(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1alpha1.CloudBuildWorkerPool, error) - UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1alpha1.CloudBuildWorkerPool, error) + Create(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.CreateOptions) (*v1beta1.CloudBuildWorkerPool, error) + Update(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1beta1.CloudBuildWorkerPool, error) + UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1beta1.CloudBuildWorkerPool, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CloudBuildWorkerPool, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CloudBuildWorkerPoolList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CloudBuildWorkerPool, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CloudBuildWorkerPoolList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudBuildWorkerPool, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudBuildWorkerPool, err error) CloudBuildWorkerPoolExpansion } @@ -60,7 +60,7 @@ type cloudBuildWorkerPools struct { } // newCloudBuildWorkerPools returns a CloudBuildWorkerPools -func newCloudBuildWorkerPools(c *CloudbuildV1alpha1Client, namespace string) *cloudBuildWorkerPools { +func newCloudBuildWorkerPools(c *CloudbuildV1beta1Client, namespace string) *cloudBuildWorkerPools { return &cloudBuildWorkerPools{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newCloudBuildWorkerPools(c *CloudbuildV1alpha1Client, namespace string) *cl } // Get takes name of the cloudBuildWorkerPool, and returns the corresponding cloudBuildWorkerPool object, and an error if there is any. -func (c *cloudBuildWorkerPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { - result = &v1alpha1.CloudBuildWorkerPool{} +func (c *cloudBuildWorkerPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { + result = &v1beta1.CloudBuildWorkerPool{} err = c.client.Get(). Namespace(c.ns). Resource("cloudbuildworkerpools"). @@ -81,12 +81,12 @@ func (c *cloudBuildWorkerPools) Get(ctx context.Context, name string, options v1 } // List takes label and field selectors, and returns the list of CloudBuildWorkerPools that match those selectors. -func (c *cloudBuildWorkerPools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudBuildWorkerPoolList, err error) { +func (c *cloudBuildWorkerPools) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudBuildWorkerPoolList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.CloudBuildWorkerPoolList{} + result = &v1beta1.CloudBuildWorkerPoolList{} err = c.client.Get(). Namespace(c.ns). Resource("cloudbuildworkerpools"). @@ -113,8 +113,8 @@ func (c *cloudBuildWorkerPools) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a cloudBuildWorkerPool and creates it. Returns the server's representation of the cloudBuildWorkerPool, and an error, if there is any. -func (c *cloudBuildWorkerPools) Create(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.CreateOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { - result = &v1alpha1.CloudBuildWorkerPool{} +func (c *cloudBuildWorkerPools) Create(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.CreateOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { + result = &v1beta1.CloudBuildWorkerPool{} err = c.client.Post(). Namespace(c.ns). Resource("cloudbuildworkerpools"). @@ -126,8 +126,8 @@ func (c *cloudBuildWorkerPools) Create(ctx context.Context, cloudBuildWorkerPool } // Update takes the representation of a cloudBuildWorkerPool and updates it. Returns the server's representation of the cloudBuildWorkerPool, and an error, if there is any. -func (c *cloudBuildWorkerPools) Update(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { - result = &v1alpha1.CloudBuildWorkerPool{} +func (c *cloudBuildWorkerPools) Update(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { + result = &v1beta1.CloudBuildWorkerPool{} err = c.client.Put(). Namespace(c.ns). Resource("cloudbuildworkerpools"). @@ -141,8 +141,8 @@ func (c *cloudBuildWorkerPools) Update(ctx context.Context, cloudBuildWorkerPool // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cloudBuildWorkerPools) UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { - result = &v1alpha1.CloudBuildWorkerPool{} +func (c *cloudBuildWorkerPools) UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { + result = &v1beta1.CloudBuildWorkerPool{} err = c.client.Put(). Namespace(c.ns). Resource("cloudbuildworkerpools"). @@ -183,8 +183,8 @@ func (c *cloudBuildWorkerPools) DeleteCollection(ctx context.Context, opts v1.De } // Patch applies the patch and returns the patched cloudBuildWorkerPool. -func (c *cloudBuildWorkerPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudBuildWorkerPool, err error) { - result = &v1alpha1.CloudBuildWorkerPool{} +func (c *cloudBuildWorkerPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudBuildWorkerPool, err error) { + result = &v1beta1.CloudBuildWorkerPool{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cloudbuildworkerpools"). diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuild_client.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuild_client.go index 6e3183820a..c54c724b27 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuild_client.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuild_client.go @@ -35,6 +35,10 @@ func (c *FakeCloudbuildV1beta1) CloudBuildTriggers(namespace string) v1beta1.Clo return &FakeCloudBuildTriggers{c, namespace} } +func (c *FakeCloudbuildV1beta1) CloudBuildWorkerPools(namespace string) v1beta1.CloudBuildWorkerPoolInterface { + return &FakeCloudBuildWorkerPools{c, namespace} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeCloudbuildV1beta1) RESTClient() rest.Interface { diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuildworkerpool.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuildworkerpool.go similarity index 69% rename from pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuildworkerpool.go rename to pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuildworkerpool.go index 1bf7db4b49..09319e3d99 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1alpha1/fake/fake_cloudbuildworkerpool.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/fake/fake_cloudbuildworkerpool.go @@ -24,7 +24,7 @@ package fake import ( "context" - v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1alpha1" + v1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/cloudbuild/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" @@ -34,29 +34,29 @@ import ( // FakeCloudBuildWorkerPools implements CloudBuildWorkerPoolInterface type FakeCloudBuildWorkerPools struct { - Fake *FakeCloudbuildV1alpha1 + Fake *FakeCloudbuildV1beta1 ns string } -var cloudbuildworkerpoolsResource = v1alpha1.SchemeGroupVersion.WithResource("cloudbuildworkerpools") +var cloudbuildworkerpoolsResource = v1beta1.SchemeGroupVersion.WithResource("cloudbuildworkerpools") -var cloudbuildworkerpoolsKind = v1alpha1.SchemeGroupVersion.WithKind("CloudBuildWorkerPool") +var cloudbuildworkerpoolsKind = v1beta1.SchemeGroupVersion.WithKind("CloudBuildWorkerPool") // Get takes name of the cloudBuildWorkerPool, and returns the corresponding cloudBuildWorkerPool object, and an error if there is any. -func (c *FakeCloudBuildWorkerPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { +func (c *FakeCloudBuildWorkerPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(cloudbuildworkerpoolsResource, c.ns, name), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewGetAction(cloudbuildworkerpoolsResource, c.ns, name), &v1beta1.CloudBuildWorkerPool{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudBuildWorkerPool), err + return obj.(*v1beta1.CloudBuildWorkerPool), err } // List takes label and field selectors, and returns the list of CloudBuildWorkerPools that match those selectors. -func (c *FakeCloudBuildWorkerPools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudBuildWorkerPoolList, err error) { +func (c *FakeCloudBuildWorkerPools) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudBuildWorkerPoolList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(cloudbuildworkerpoolsResource, cloudbuildworkerpoolsKind, c.ns, opts), &v1alpha1.CloudBuildWorkerPoolList{}) + Invokes(testing.NewListAction(cloudbuildworkerpoolsResource, cloudbuildworkerpoolsKind, c.ns, opts), &v1beta1.CloudBuildWorkerPoolList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakeCloudBuildWorkerPools) List(ctx context.Context, opts v1.ListOption if label == nil { label = labels.Everything() } - list := &v1alpha1.CloudBuildWorkerPoolList{ListMeta: obj.(*v1alpha1.CloudBuildWorkerPoolList).ListMeta} - for _, item := range obj.(*v1alpha1.CloudBuildWorkerPoolList).Items { + list := &v1beta1.CloudBuildWorkerPoolList{ListMeta: obj.(*v1beta1.CloudBuildWorkerPoolList).ListMeta} + for _, item := range obj.(*v1beta1.CloudBuildWorkerPoolList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,43 +83,43 @@ func (c *FakeCloudBuildWorkerPools) Watch(ctx context.Context, opts v1.ListOptio } // Create takes the representation of a cloudBuildWorkerPool and creates it. Returns the server's representation of the cloudBuildWorkerPool, and an error, if there is any. -func (c *FakeCloudBuildWorkerPools) Create(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.CreateOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { +func (c *FakeCloudBuildWorkerPools) Create(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.CreateOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(cloudbuildworkerpoolsResource, c.ns, cloudBuildWorkerPool), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewCreateAction(cloudbuildworkerpoolsResource, c.ns, cloudBuildWorkerPool), &v1beta1.CloudBuildWorkerPool{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudBuildWorkerPool), err + return obj.(*v1beta1.CloudBuildWorkerPool), err } // Update takes the representation of a cloudBuildWorkerPool and updates it. Returns the server's representation of the cloudBuildWorkerPool, and an error, if there is any. -func (c *FakeCloudBuildWorkerPools) Update(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1alpha1.CloudBuildWorkerPool, err error) { +func (c *FakeCloudBuildWorkerPools) Update(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (result *v1beta1.CloudBuildWorkerPool, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cloudbuildworkerpoolsResource, c.ns, cloudBuildWorkerPool), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewUpdateAction(cloudbuildworkerpoolsResource, c.ns, cloudBuildWorkerPool), &v1beta1.CloudBuildWorkerPool{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudBuildWorkerPool), err + return obj.(*v1beta1.CloudBuildWorkerPool), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCloudBuildWorkerPools) UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1alpha1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1alpha1.CloudBuildWorkerPool, error) { +func (c *FakeCloudBuildWorkerPools) UpdateStatus(ctx context.Context, cloudBuildWorkerPool *v1beta1.CloudBuildWorkerPool, opts v1.UpdateOptions) (*v1beta1.CloudBuildWorkerPool, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cloudbuildworkerpoolsResource, "status", c.ns, cloudBuildWorkerPool), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewUpdateSubresourceAction(cloudbuildworkerpoolsResource, "status", c.ns, cloudBuildWorkerPool), &v1beta1.CloudBuildWorkerPool{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudBuildWorkerPool), err + return obj.(*v1beta1.CloudBuildWorkerPool), err } // Delete takes name of the cloudBuildWorkerPool and deletes it. Returns an error if one occurs. func (c *FakeCloudBuildWorkerPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cloudbuildworkerpoolsResource, c.ns, name, opts), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewDeleteActionWithOptions(cloudbuildworkerpoolsResource, c.ns, name, opts), &v1beta1.CloudBuildWorkerPool{}) return err } @@ -128,17 +128,17 @@ func (c *FakeCloudBuildWorkerPools) Delete(ctx context.Context, name string, opt func (c *FakeCloudBuildWorkerPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(cloudbuildworkerpoolsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.CloudBuildWorkerPoolList{}) + _, err := c.Fake.Invokes(action, &v1beta1.CloudBuildWorkerPoolList{}) return err } // Patch applies the patch and returns the patched cloudBuildWorkerPool. -func (c *FakeCloudBuildWorkerPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudBuildWorkerPool, err error) { +func (c *FakeCloudBuildWorkerPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudBuildWorkerPool, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cloudbuildworkerpoolsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudBuildWorkerPool{}) + Invokes(testing.NewPatchSubresourceAction(cloudbuildworkerpoolsResource, c.ns, name, pt, data, subresources...), &v1beta1.CloudBuildWorkerPool{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.CloudBuildWorkerPool), err + return obj.(*v1beta1.CloudBuildWorkerPool), err } diff --git a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/generated_expansion.go index a22920228c..85031d8314 100644 --- a/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/generated_expansion.go +++ b/pkg/clients/generated/client/clientset/versioned/typed/cloudbuild/v1beta1/generated_expansion.go @@ -22,3 +22,5 @@ package v1beta1 type CloudBuildTriggerExpansion interface{} + +type CloudBuildWorkerPoolExpansion interface{} diff --git a/pkg/controller/direct/cloudbuild/workerpool_controller.go b/pkg/controller/direct/cloudbuild/workerpool_controller.go index e471c3de6f..43de4177c0 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_controller.go +++ b/pkg/controller/direct/cloudbuild/workerpool_controller.go @@ -27,7 +27,7 @@ import ( cloudbuildpb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" "google.golang.org/api/option" - krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/cloudbuild/v1alpha1" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/cloudbuild/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/config" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/directbase" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/references" diff --git a/pkg/controller/direct/cloudbuild/workerpool_mappings.go b/pkg/controller/direct/cloudbuild/workerpool_mappings.go index eedf53f854..2cc952b44e 100644 --- a/pkg/controller/direct/cloudbuild/workerpool_mappings.go +++ b/pkg/controller/direct/cloudbuild/workerpool_mappings.go @@ -19,7 +19,7 @@ package cloudbuild import ( pb "cloud.google.com/go/cloudbuild/apiv1/v2/cloudbuildpb" - krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/cloudbuild/v1alpha1" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/cloudbuild/v1beta1" refv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" ) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml similarity index 96% rename from pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index ca7ad1f3af..b27cfeb085 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -1,4 +1,4 @@ -apiVersion: cloudbuild.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: finalizers: diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_http.log similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/_http.log rename to pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_http.log diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/create.yaml similarity index 95% rename from pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/create.yaml index e944b2e17c..2ab4f97d62 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/create.yaml @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: cloudbuild.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: name: cloudbuildworkerpool-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/dependencies.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/dependencies.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/dependencies.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/update.yaml similarity index 95% rename from pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml rename to pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/update.yaml index 3347095713..bd16a112eb 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1alpha1/cloudbuildworkerpool/update.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/update.yaml @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: cloudbuild.cnrm.cloud.google.com/v1alpha1 +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: name: cloudbuildworkerpool-${uniqueId} From 3dbe82a0308018b3dcae8881d0fbd0e5b06268e6 Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 21 Jun 2024 10:32:41 -0400 Subject: [PATCH 069/101] monitoringdashboard: Add alertChart Implement as a ref --- apis/monitoring/v1beta1/alertpolicyref.go | 24 ++ .../v1beta1/monitoringdashboard_types.go | 12 +- .../v1beta1/zz_generated.deepcopy.go | 27 +- ...ards.monitoring.cnrm.cloud.google.com.yaml | 148 ++++++++++ docs/releasenotes/release-1.120.md | 2 + .../v1beta1/monitoringdashboard_types.go | 13 + .../v1beta1/zz_generated.deepcopy.go | 27 ++ .../dashboard_generated.mappings.go | 43 +-- .../direct/monitoring/dashboard_mappings.go | 23 ++ .../monitoringdashboard_controller.go | 2 +- pkg/controller/direct/monitoring/refs.go | 84 +++++- .../direct/references/computenetworkref.go | 54 +++- ...ated_export_monitoringdashboardfull.golden | 4 + ...object_monitoringdashboardfull.golden.yaml | 4 + .../monitoringdashboardfull/_http.log | 262 ++++++++++++++++++ .../monitoringdashboardfull/create.yaml | 4 + .../monitoringdashboardfull/dependencies.yaml | 42 +++ .../monitoring/monitoringdashboard.md | 226 ++++++++++++++- tests/e2e/normalize.go | 14 + 19 files changed, 952 insertions(+), 63 deletions(-) create mode 100644 apis/monitoring/v1beta1/alertpolicyref.go create mode 100644 pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/dependencies.yaml diff --git a/apis/monitoring/v1beta1/alertpolicyref.go b/apis/monitoring/v1beta1/alertpolicyref.go new file mode 100644 index 0000000000..2e555853c7 --- /dev/null +++ b/apis/monitoring/v1beta1/alertpolicyref.go @@ -0,0 +1,24 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +type MonitoringAlertPolicyRef struct { + /* The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC. */ + External string `json:"external,omitempty"` + /* The `name` field of a `MonitoringAlertPolicy` resource. */ + Name string `json:"name,omitempty"` + /* The `namespace` field of a `MonitoringAlertPolicy` resource. */ + Namespace string `json:"namespace,omitempty"` +} diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 4d038544b8..f99b4b2b3d 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -40,12 +40,9 @@ var ( // +kcc:proto=google.monitoring.dashboard.v1.AlertChart type AlertChart struct { - // Required. The resource name of the alert policy. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - // + // Required. A reference to the MonitoringAlertPolicy. // +required - Name *string `json:"name,omitempty"` + AlertPolicyRef *MonitoringAlertPolicyRef `json:"alertPolicyRef"` } // +kcc:proto=google.monitoring.dashboard.v1.ChartOptions @@ -454,8 +451,6 @@ type Widget struct { Blank *Empty `json:"blank,omitempty"` /*NOTYET - // A chart of alert policy data. - AlertChart *AlertChart `json:"alertChart,omitempty"` // A widget that displays time series data in a tabular format. TimeSeriesTable *TimeSeriesTable `json:"timeSeriesTable,omitempty"` @@ -492,6 +487,9 @@ type Widget struct { // underscores. Widget ids are optional. Id *string `json:"id,omitempty"` */ + + // A chart of alert policy data. + AlertChart *AlertChart `json:"alertChart,omitempty"` } // +kcc:proto=emptypb.Empty diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 26542bc9bc..3215814d4b 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -62,9 +62,9 @@ func (in *Aggregation) DeepCopy() *Aggregation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AlertChart) DeepCopyInto(out *AlertChart) { *out = *in - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) + if in.AlertPolicyRef != nil { + in, out := &in.AlertPolicyRef, &out.AlertPolicyRef + *out = new(MonitoringAlertPolicyRef) **out = **in } return @@ -336,6 +336,22 @@ func (in *LogsPanel) DeepCopy() *LogsPanel { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringAlertPolicyRef) DeepCopyInto(out *MonitoringAlertPolicyRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringAlertPolicyRef. +func (in *MonitoringAlertPolicyRef) DeepCopy() *MonitoringAlertPolicyRef { + if in == nil { + return nil + } + out := new(MonitoringAlertPolicyRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoringDashboard) DeepCopyInto(out *MonitoringDashboard) { *out = *in @@ -1256,6 +1272,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(SectionHeader) (*in).DeepCopyInto(*out) } + if in.AlertChart != nil { + in, out := &in.AlertChart, &out.AlertChart + *out = new(AlertChart) + (*in).DeepCopyInto(*out) + } return } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index 21efa8c20b..af8f75523f 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -81,6 +81,43 @@ spec: this column. items: properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object blank: description: A blank space. type: object @@ -1534,6 +1571,43 @@ spec: the columns row-first. items: properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link in the + form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object blank: description: A blank space. type: object @@ -2914,6 +2988,43 @@ spec: description: The informational widget contained in the tile. For example an `XyChart`. properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object blank: description: A blank space. type: object @@ -4397,6 +4508,43 @@ spec: this row. items: properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object blank: description: A blank space. type: object diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 39c575ff12..222f6c5287 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -25,6 +25,8 @@ output fields from GCP APIs are in `status.observedState.*` * Added `spec.severity` field. * `MonitoringDashboard` + * Added `alertChart` widgets. + * Added `collapsibleGroup` widgets. * Added `style` fields to text widgets. * Added `sectionHeader` widgets. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index a681ee8b0a..b84bb4c8fc 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -92,6 +92,11 @@ type DashboardAggregation struct { PerSeriesAligner *string `json:"perSeriesAligner,omitempty"` } +type DashboardAlertChart struct { + /* Required. A reference to the MonitoringAlertPolicy. */ + AlertPolicyRef v1alpha1.ResourceRef `json:"alertPolicyRef"` +} + type DashboardBlank struct { } @@ -514,6 +519,10 @@ type DashboardTimeSeriesQuery struct { } type DashboardWidget struct { + /* A chart of alert policy data. */ + // +optional + AlertChart *DashboardAlertChart `json:"alertChart,omitempty"` + /* A blank space. */ // +optional Blank *DashboardBlank `json:"blank,omitempty"` @@ -548,6 +557,10 @@ type DashboardWidget struct { } type DashboardWidgets struct { + /* A chart of alert policy data. */ + // +optional + AlertChart *DashboardAlertChart `json:"alertChart,omitempty"` + /* A blank space. */ // +optional Blank *DashboardBlank `json:"blank,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index cd92e19634..0d6f914beb 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -542,6 +542,23 @@ func (in *DashboardAggregation) DeepCopy() *DashboardAggregation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardAlertChart) DeepCopyInto(out *DashboardAlertChart) { + *out = *in + out.AlertPolicyRef = in.AlertPolicyRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardAlertChart. +func (in *DashboardAlertChart) DeepCopy() *DashboardAlertChart { + if in == nil { + return nil + } + out := new(DashboardAlertChart) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardBlank) DeepCopyInto(out *DashboardBlank) { *out = *in @@ -1335,6 +1352,11 @@ func (in *DashboardTimeSeriesQuery) DeepCopy() *DashboardTimeSeriesQuery { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = *in + if in.AlertChart != nil { + in, out := &in.AlertChart, &out.AlertChart + *out = new(DashboardAlertChart) + **out = **in + } if in.Blank != nil { in, out := &in.Blank, &out.Blank *out = new(DashboardBlank) @@ -1391,6 +1413,11 @@ func (in *DashboardWidget) DeepCopy() *DashboardWidget { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = *in + if in.AlertChart != nil { + in, out := &in.AlertChart, &out.AlertChart + *out = new(DashboardAlertChart) + **out = **in + } if in.Blank != nil { in, out := &in.Blank, &out.Blank *out = new(DashboardBlank) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 1eb85cba7b..a78976e89d 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -42,22 +42,7 @@ func Aggregation_ToProto(mapCtx *MapContext, in *krm.Aggregation) *pb.Aggregatio out.GroupByFields = in.GroupByFields return out } -func AlertChart_FromProto(mapCtx *MapContext, in *pb.AlertChart) *krm.AlertChart { - if in == nil { - return nil - } - out := &krm.AlertChart{} - out.Name = LazyPtr(in.GetName()) - return out -} -func AlertChart_ToProto(mapCtx *MapContext, in *krm.AlertChart) *pb.AlertChart { - if in == nil { - return nil - } - out := &pb.AlertChart{} - out.Name = ValueOf(in.Name) - return out -} + func ChartOptions_FromProto(mapCtx *MapContext, in *pb.ChartOptions) *krm.ChartOptions { if in == nil { return nil @@ -136,6 +121,7 @@ func ColumnLayout_Column_ToProto(mapCtx *MapContext, in *krm.ColumnLayout_Column // out.FilterType = Enum_FromProto(mapCtx, in.FilterType) // return out // } + // func DashboardFilter_ToProto(mapCtx *MapContext, in *krm.DashboardFilter) *pb.DashboardFilter { // if in == nil { // return nil @@ -150,25 +136,6 @@ func ColumnLayout_Column_ToProto(mapCtx *MapContext, in *krm.ColumnLayout_Column // return out // } -// func Dashboard_LabelsEntry_FromProto(mapCtx *MapContext, in *pb.Dashboard_LabelsEntry) *krm.Dashboard_LabelsEntry { -// if in == nil { -// return nil -// } -// out := &krm.Dashboard_LabelsEntry{} -// out.Key = LazyPtr(in.GetKey()) -// out.Value = LazyPtr(in.GetValue()) -// return out -// } -// -// func Dashboard_LabelsEntry_ToProto(mapCtx *MapContext, in *krm.Dashboard_LabelsEntry) *pb.Dashboard_LabelsEntry { -// if in == nil { -// return nil -// } -// out := &pb.Dashboard_LabelsEntry{} -// out.Key = ValueOf(in.Key) -// out.Value = ValueOf(in.Value) -// return out -// } func ErrorReportingPanel_FromProto(mapCtx *MapContext, in *pb.ErrorReportingPanel) *krm.ErrorReportingPanel { if in == nil { return nil @@ -833,7 +800,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.Scorecard = Scorecard_FromProto(mapCtx, in.GetScorecard()) out.Text = Text_FromProto(mapCtx, in.GetText()) out.Blank = Empty_FromProto(mapCtx, in.GetBlank()) - // MISSING: AlertChart + out.AlertChart = AlertChart_FromProto(mapCtx, in.GetAlertChart()) // MISSING: TimeSeriesTable out.CollapsibleGroup = CollapsibleGroup_FromProto(mapCtx, in.GetCollapsibleGroup()) out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) @@ -863,7 +830,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { if oneof := Empty_ToProto(mapCtx, in.Blank); oneof != nil { out.Content = &pb.Widget_Blank{Blank: oneof} } - // MISSING: AlertChart + if oneof := AlertChart_ToProto(mapCtx, in.AlertChart); oneof != nil { + out.Content = &pb.Widget_AlertChart{AlertChart: oneof} + } // MISSING: TimeSeriesTable if oneof := CollapsibleGroup_ToProto(mapCtx, in.CollapsibleGroup); oneof != nil { out.Content = &pb.Widget_CollapsibleGroup{CollapsibleGroup: oneof} diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index ee55c1126d..b4538eb0eb 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -40,6 +40,29 @@ func Empty_ToProto(mapCtx *MapContext, in *krm.Empty) *emptypb.Empty { return out } +func AlertChart_FromProto(mapCtx *MapContext, in *pb.AlertChart) *krm.AlertChart { + if in == nil { + return nil + } + out := &krm.AlertChart{} + if in.Name != "" { + out.AlertPolicyRef = &krm.MonitoringAlertPolicyRef{ + External: in.Name, + } + } + return out +} +func AlertChart_ToProto(mapCtx *MapContext, in *krm.AlertChart) *pb.AlertChart { + if in == nil { + return nil + } + out := &pb.AlertChart{} + if in.AlertPolicyRef != nil { + out.Name = in.AlertPolicyRef.External + } + return out +} + func Aggregation_AlignmentPeriod_FromProto(mapCtx *MapContext, in *durationpb.Duration) *string { return SecondsString_FromProto(mapCtx, in) } diff --git a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go index c04c129a01..f62d0d6638 100644 --- a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go +++ b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go @@ -95,7 +95,7 @@ func (m *dashboardModel) AdapterForObject(ctx context.Context, kube client.Reade return nil, fmt.Errorf("cannot resolve project") } - if err := VisitFields(obj, &refNormalizer{ctx: ctx, src: obj, kube: kube}); err != nil { + if err := VisitFields(obj, &refNormalizer{ctx: ctx, src: obj, project: *projectRef, kube: kube}); err != nil { return nil, err } diff --git a/pkg/controller/direct/monitoring/refs.go b/pkg/controller/direct/monitoring/refs.go index 8ddd53dd96..663dc92bc6 100644 --- a/pkg/controller/direct/monitoring/refs.go +++ b/pkg/controller/direct/monitoring/refs.go @@ -23,6 +23,10 @@ import ( refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/references" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -85,10 +89,76 @@ func normalizeResourceName(ctx context.Context, reader client.Reader, src client return ref, nil } +func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader, src client.Object, project references.Project, ref *krm.MonitoringAlertPolicyRef) (*krm.MonitoringAlertPolicyRef, error) { + if ref == nil { + return nil, nil + } + + if ref.Name == "" && ref.External == "" { + return nil, fmt.Errorf("must specify either name or external on reference") + } + if ref.Name != "" && ref.External != "" { + return nil, fmt.Errorf("cannot specify both name and external on reference") + } + + if ref.External != "" { + tokens := strings.Split(ref.External, "/") + if len(tokens) == 2 && tokens[0] == "alertPolicies" { + ref = &krm.MonitoringAlertPolicyRef{ + External: fmt.Sprintf("projects/%s/alertPolicies/%s", project.ProjectID, tokens[1]), + } + } + if len(tokens) == 4 && tokens[0] == "project" && tokens[2] == "alertPolicies" { + ref = &krm.MonitoringAlertPolicyRef{ + External: fmt.Sprintf("projects/%s/alertPolicies/%s", tokens[1], tokens[3]), + } + } + return nil, fmt.Errorf("format of alertPolicyRef external=%q was not known (use projects//alertPolicies/ or alertPolicies/)", ref.External) + } + + key := types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + } + if key.Namespace == "" { + key.Namespace = src.GetNamespace() + } + + alertPolicy := &unstructured.Unstructured{} + alertPolicy.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "monitoring.cnrm.cloud.google.com", + Version: "v1beta1", + Kind: "MonitoringAlertPolicy", + }) + if err := reader.Get(ctx, key, alertPolicy); err != nil { + if apierrors.IsNotFound(err) { + return nil, fmt.Errorf("referenced MonitoringAlertPolicy %v not found", key) + } + return nil, fmt.Errorf("error reading referenced MonitoringAlertPolicy %v: %w", key, err) + } + + alertPolicyResourceID, err := references.GetResourceID(alertPolicy) + if err != nil { + return nil, err + } + + alertPolicyProjectID, err := references.ResolveProjectIDForObject(ctx, reader, alertPolicy) + if err != nil { + return nil, err + } + + ref = &krm.MonitoringAlertPolicyRef{ + External: fmt.Sprintf("projects/%s/alertPolicies/%s", alertPolicyProjectID, alertPolicyResourceID), + } + + return ref, nil +} + type refNormalizer struct { - ctx context.Context - kube client.Reader - src client.Object + ctx context.Context + kube client.Reader + src client.Object + project references.Project } func (r *refNormalizer) VisitField(path string, v any) error { @@ -101,5 +171,13 @@ func (r *refNormalizer) VisitField(path string, v any) error { } } } + if alertChart, ok := v.(*krm.AlertChart); ok { + if ref, err := normalizeMonitoringAlertPolicyRef(r.ctx, r.kube, r.src, r.project, alertChart.AlertPolicyRef); err != nil { + return err + } else { + alertChart.AlertPolicyRef = ref + } + } + return nil } diff --git a/pkg/controller/direct/references/computenetworkref.go b/pkg/controller/direct/references/computenetworkref.go index d93d9bb361..1de30e1909 100644 --- a/pkg/controller/direct/references/computenetworkref.go +++ b/pkg/controller/direct/references/computenetworkref.go @@ -20,6 +20,7 @@ import ( "strings" "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -88,17 +89,52 @@ func ResolveComputeNetwork(ctx context.Context, reader client.Reader, src client computenetworkID = computenetwork.GetName() } - //TODO: extract GetProject helper function! - projectID := "" - annotations := computenetwork.GetAnnotations() - fromAnnotation, ok := annotations["cnrm.cloud.google.com/project-id"] - if ok { - projectID = fromAnnotation - } else if computenetwork.GetNamespace() != "" { - projectID = computenetwork.GetNamespace() + computeNetworkProjectID, err := ResolveProjectIDForObject(ctx, reader, computenetwork) + if err != nil { + return nil, err } return &ComputeNetwork{ - Project: projectID, + Project: computeNetworkProjectID, ComputeNetworkID: computenetworkID, }, nil } + +func ResolveProjectIDForObject(ctx context.Context, reader client.Reader, obj *unstructured.Unstructured) (string, error) { + projectRefExternal, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "external") + if projectRefExternal != "" { + projectRef := refs.ProjectRef{ + External: projectRefExternal, + } + + project, err := ResolveProject(ctx, reader, obj, &projectRef) + if err != nil { + return "", fmt.Errorf("cannot parse projectRef.external %q in %v %v/%v: %w", projectRefExternal, obj.GetKind(), obj.GetNamespace(), obj.GetName(), err) + } + return project.ProjectID, nil + } + + projectRefName, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "name") + if projectRefName != "" { + projectRefNamespace, _, _ := unstructured.NestedString(obj.Object, "spec", "projectRef", "namespace") + + projectRef := refs.ProjectRef{ + Name: projectRefName, + Namespace: projectRefNamespace, + } + if projectRef.Namespace == "" { + projectRef.Namespace = obj.GetNamespace() + } + + project, err := ResolveProject(ctx, reader, obj, &projectRef) + if err != nil { + return "", fmt.Errorf("cannot parse projectRef in %v %v/%v: %w", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err) + } + return project.ProjectID, nil + } + + if projectID := obj.GetAnnotations()["cnrm.cloud.google.com/project-id"]; projectID != "" { + return projectID, nil + } + + return "", fmt.Errorf("cannot find project id for %v %v/%v", obj.GetKind(), obj.GetNamespace(), obj.GetName()) +} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 4e9b6f4a78..28c58fc9b6 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -58,6 +58,10 @@ spec: - collapsibleGroup: collapsed: true title: CollapsibleGroup Widget + - alertChart: + alertPolicyRef: + external: projects/${projectId}/alertPolicies/${alertPolicyID} + title: AlertChart Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 195ec8a051..422e7a8bb2 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -66,6 +66,10 @@ spec: - collapsibleGroup: collapsed: true title: CollapsibleGroup Widget + - alertChart: + alertPolicyRef: + name: monitoringalertpolicy-${uniqueId} + title: AlertChart Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index fb8c378c26..11c18f61dc 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -1,3 +1,165 @@ +POST https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies?alt=json +Content-Type: application/json +User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "combiner": "AND_WITH_MATCHING_RESOURCE", + "conditions": [ + { + "conditionThreshold": { + "aggregations": [ + { + "alignmentPeriod": "60s", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ], + "perSeriesAligner": "ALIGN_MAX" + } + ], + "comparison": "COMPARISON_GT", + "duration": "900s", + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "thresholdValue": 0.9, + "trigger": { + "count": 1 + } + }, + "displayName": "Very high CPU usage" + } + ], + "displayName": "Test Alert Policy", + "enabled": true, + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + } +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "combiner": "AND_WITH_MATCHING_RESOURCE", + "conditions": [ + { + "conditionThreshold": { + "aggregations": [ + { + "alignmentPeriod": "60s", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ], + "perSeriesAligner": "ALIGN_MAX" + } + ], + "comparison": "COMPARISON_GT", + "duration": "900s", + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "thresholdValue": 0.9, + "trigger": { + "count": 1 + } + }, + "displayName": "Very high CPU usage", + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}/conditions/${conditionID}" + } + ], + "creationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "displayName": "Test Alert Policy", + "enabled": true, + "mutationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + } +} + +--- + +GET https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies/${alertPolicyID}?alt=json +Content-Type: application/json +User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "combiner": "AND_WITH_MATCHING_RESOURCE", + "conditions": [ + { + "conditionThreshold": { + "aggregations": [ + { + "alignmentPeriod": "60s", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ], + "perSeriesAligner": "ALIGN_MAX" + } + ], + "comparison": "COMPARISON_GT", + "duration": "900s", + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "thresholdValue": 0.9, + "trigger": { + "count": 1 + } + }, + "displayName": "Very high CPU usage", + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}/conditions/${conditionID}" + } + ], + "creationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "displayName": "Test Alert Policy", + "enabled": true, + "mutationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + } +} + +--- + GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint Content-Type: application/json User-Agent: kcc/controller-manager @@ -118,6 +280,12 @@ x-goog-request-params: parent=projects%2F${projectId} "collapsed": true }, "title": "CollapsibleGroup Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" } ] } @@ -229,6 +397,12 @@ X-Xss-Protection: 0 "collapsed": true }, "title": "CollapsibleGroup Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" } ] } @@ -348,6 +522,12 @@ X-Xss-Protection: 0 "collapsed": true }, "title": "CollapsibleGroup Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" } ] } @@ -376,4 +556,86 @@ X-Content-Type-Options: nosniff X-Frame-Options: SAMEORIGIN X-Xss-Protection: 0 +{} + +--- + +GET https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies/${alertPolicyID}?alt=json +Content-Type: application/json +User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "combiner": "AND_WITH_MATCHING_RESOURCE", + "conditions": [ + { + "conditionThreshold": { + "aggregations": [ + { + "alignmentPeriod": "60s", + "crossSeriesReducer": "REDUCE_MEAN", + "groupByFields": [ + "project", + "resource.label.instance_id", + "resource.label.zone" + ], + "perSeriesAligner": "ALIGN_MAX" + } + ], + "comparison": "COMPARISON_GT", + "duration": "900s", + "filter": "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"", + "thresholdValue": 0.9, + "trigger": { + "count": 1 + } + }, + "displayName": "Very high CPU usage", + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}/conditions/${conditionID}" + } + ], + "creationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "displayName": "Test Alert Policy", + "enabled": true, + "mutationRecord": { + "mutateTime": "2024-04-01T12:34:56.123456Z", + "mutatedBy": "user@example.com" + }, + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}", + "userLabels": { + "cnrm-test": "true", + "managed-by-cnrm": "true" + } +} + +--- + +DELETE https://monitoring.googleapis.com/v3/projects/${projectId}/alertPolicies/${alertPolicyID}?alt=json +Content-Type: application/json +User-Agent: Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + {} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index e8f560ec81..0602c420bb 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -72,3 +72,7 @@ spec: - title: "CollapsibleGroup Widget" collapsibleGroup: collapsed: true + - title: "AlertChart Widget" + alertChart: + alertPolicyRef: + name: monitoringalertpolicy-${uniqueId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/dependencies.yaml new file mode 100644 index 0000000000..95a1432802 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/dependencies.yaml @@ -0,0 +1,42 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: monitoring.cnrm.cloud.google.com/v1beta1 +kind: MonitoringAlertPolicy +metadata: + name: monitoringalertpolicy-${uniqueId} +spec: + displayName: Test Alert Policy + enabled: true + # notificationChannels: + # - name: monitoringnotificationchannel1-${uniqueId} + # - name: monitoringnotificationchannel2-${uniqueId} + combiner: AND_WITH_MATCHING_RESOURCE + conditions: + - displayName: Very high CPU usage + conditionThreshold: + filter: metric.type="compute.googleapis.com/instance/cpu/utilization" AND resource.type="gce_instance" + thresholdValue: 0.9 + comparison: COMPARISON_GT + duration: 900s + trigger: + count: 1 + aggregations: + - perSeriesAligner: ALIGN_MAX + alignmentPeriod: 60s + crossSeriesReducer: REDUCE_MEAN + groupByFields: + - project + - resource.label.instance_id + - resource.label.zone diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 1b277d1b03..c0df0ac92d 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -81,7 +81,12 @@ columnLayout: columns: - weight: integer widgets: - - blank: {} + - alertChart: + alertPolicyRef: + external: string + name: string + namespace: string + blank: {} collapsibleGroup: collapsed: boolean logsPanel: @@ -237,7 +242,12 @@ displayName: string gridLayout: columns: integer widgets: - - blank: {} + - alertChart: + alertPolicyRef: + external: string + name: string + namespace: string + blank: {} collapsibleGroup: collapsed: boolean logsPanel: @@ -394,6 +404,11 @@ mosaicLayout: tiles: - height: integer widget: + alertChart: + alertPolicyRef: + external: string + name: string + namespace: string blank: {} collapsibleGroup: collapsed: boolean @@ -559,7 +574,12 @@ rowLayout: rows: - weight: integer widgets: - - blank: {} + - alertChart: + alertPolicyRef: + external: string + name: string + namespace: string + blank: {} collapsibleGroup: collapsed: boolean logsPanel: @@ -780,6 +800,56 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].blank

@@ -2817,6 +2887,56 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

gridLayout.widgets[].alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ +

gridLayout.widgets[].blank

@@ -4864,6 +4984,56 @@ rowLayout:

{% verbatim %}The informational widget contained in the tile. For example an `XyChart`.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.blank

@@ -7001,6 +7171,56 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].blank

diff --git a/tests/e2e/normalize.go b/tests/e2e/normalize.go index fcedb680c7..61c5b42694 100644 --- a/tests/e2e/normalize.go +++ b/tests/e2e/normalize.go @@ -106,6 +106,20 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject // Specific to Compute SSL Certs visitor.replacePaths[".status.observedState.certificateId"] = "1.719337333063698e+18" + // Specific to MonitoringDashboard + visitor.stringTransforms = append(visitor.stringTransforms, func(path string, s string) string { + if strings.HasSuffix(path, ".alertChart.alertPolicyRef.external") { + tokens := strings.Split(s, "/") + if len(tokens) > 2 { + switch tokens[len(tokens)-2] { + case "alertPolicies": + s = strings.ReplaceAll(s, tokens[len(tokens)-1], "${alertPolicyID}") + } + } + } + return s + }) + visitor.sortSlices = sets.New[string]() // TODO: This should not be needed, we want to avoid churning the kube objects visitor.sortSlices.Insert(".spec.access") From 5a240b7b07d4d68b94e97d83ce3dc2fd686b0c02 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 27 Jun 2024 11:02:31 -0400 Subject: [PATCH 070/101] refactor: move MonitoringAlertPolicyRef to refs package --- .../v1beta1/monitoringdashboard_types.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 19 ++----------------- .../v1beta1/alertpolicyref.go | 0 .../direct/monitoring/dashboard_mappings.go | 3 ++- pkg/controller/direct/monitoring/refs.go | 8 ++++---- 5 files changed, 9 insertions(+), 23 deletions(-) rename apis/{monitoring => refs}/v1beta1/alertpolicyref.go (100%) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index f99b4b2b3d..61360dabf7 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -42,7 +42,7 @@ var ( type AlertChart struct { // Required. A reference to the MonitoringAlertPolicy. // +required - AlertPolicyRef *MonitoringAlertPolicyRef `json:"alertPolicyRef"` + AlertPolicyRef *refs.MonitoringAlertPolicyRef `json:"alertPolicyRef"` } // +kcc:proto=google.monitoring.dashboard.v1.ChartOptions diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 3215814d4b..36b6ca8349 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -19,6 +19,7 @@ package v1beta1 import ( + refsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -64,7 +65,7 @@ func (in *AlertChart) DeepCopyInto(out *AlertChart) { *out = *in if in.AlertPolicyRef != nil { in, out := &in.AlertPolicyRef, &out.AlertPolicyRef - *out = new(MonitoringAlertPolicyRef) + *out = new(refsv1beta1.MonitoringAlertPolicyRef) **out = **in } return @@ -336,22 +337,6 @@ func (in *LogsPanel) DeepCopy() *LogsPanel { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringAlertPolicyRef) DeepCopyInto(out *MonitoringAlertPolicyRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringAlertPolicyRef. -func (in *MonitoringAlertPolicyRef) DeepCopy() *MonitoringAlertPolicyRef { - if in == nil { - return nil - } - out := new(MonitoringAlertPolicyRef) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoringDashboard) DeepCopyInto(out *MonitoringDashboard) { *out = *in diff --git a/apis/monitoring/v1beta1/alertpolicyref.go b/apis/refs/v1beta1/alertpolicyref.go similarity index 100% rename from apis/monitoring/v1beta1/alertpolicyref.go rename to apis/refs/v1beta1/alertpolicyref.go diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index b4538eb0eb..77a40e0572 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -22,6 +22,7 @@ import ( pb "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb" krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/monitoring/v1beta1" + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" ) @@ -46,7 +47,7 @@ func AlertChart_FromProto(mapCtx *MapContext, in *pb.AlertChart) *krm.AlertChart } out := &krm.AlertChart{} if in.Name != "" { - out.AlertPolicyRef = &krm.MonitoringAlertPolicyRef{ + out.AlertPolicyRef = &refs.MonitoringAlertPolicyRef{ External: in.Name, } } diff --git a/pkg/controller/direct/monitoring/refs.go b/pkg/controller/direct/monitoring/refs.go index 663dc92bc6..2b877dcd46 100644 --- a/pkg/controller/direct/monitoring/refs.go +++ b/pkg/controller/direct/monitoring/refs.go @@ -89,7 +89,7 @@ func normalizeResourceName(ctx context.Context, reader client.Reader, src client return ref, nil } -func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader, src client.Object, project references.Project, ref *krm.MonitoringAlertPolicyRef) (*krm.MonitoringAlertPolicyRef, error) { +func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader, src client.Object, project references.Project, ref *refs.MonitoringAlertPolicyRef) (*refs.MonitoringAlertPolicyRef, error) { if ref == nil { return nil, nil } @@ -104,12 +104,12 @@ func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader if ref.External != "" { tokens := strings.Split(ref.External, "/") if len(tokens) == 2 && tokens[0] == "alertPolicies" { - ref = &krm.MonitoringAlertPolicyRef{ + ref = &refs.MonitoringAlertPolicyRef{ External: fmt.Sprintf("projects/%s/alertPolicies/%s", project.ProjectID, tokens[1]), } } if len(tokens) == 4 && tokens[0] == "project" && tokens[2] == "alertPolicies" { - ref = &krm.MonitoringAlertPolicyRef{ + ref = &refs.MonitoringAlertPolicyRef{ External: fmt.Sprintf("projects/%s/alertPolicies/%s", tokens[1], tokens[3]), } } @@ -147,7 +147,7 @@ func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader return nil, err } - ref = &krm.MonitoringAlertPolicyRef{ + ref = &refs.MonitoringAlertPolicyRef{ External: fmt.Sprintf("projects/%s/alertPolicies/%s", alertPolicyProjectID, alertPolicyResourceID), } From 99080cb5edbccb46ac70e4231d7b71d576114bde Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:28:11 -0700 Subject: [PATCH 071/101] docs: don't remove alpha service mappings (#2136) * docs: don't remove alpha service mappings Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> * Apply suggestions from code review --------- Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- README.NewResourceFromTerraform.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.NewResourceFromTerraform.md b/README.NewResourceFromTerraform.md index 8e4e54f5fa..ca2c355479 100644 --- a/README.NewResourceFromTerraform.md +++ b/README.NewResourceFromTerraform.md @@ -180,6 +180,8 @@ ServiceMappings file. Add the `ResourceConfig` for your resource: 1. Set `v1alpha1ToV1beta1` to `true`. 1. Set `storageVersion` to `v1alpha1`. + > NOTE: For [autogenereated resources](https://github.com/GoogleCloudPlatform/k8s-config-connector/tree/2f4f2c6ee65034f7bbb4b4f62d2ba15842ff114d/scripts/resource-autogen/generated/servicemappings), don't remove the service mappings. Instead copy the content or the file over to [config/servicemappings/](https://github.com/GoogleCloudPlatform/k8s-config-connector/tree/v1.119.0/config/servicemappings). + 1. Ensure the `autoGenerated` field is unset. 1. Add `resourceID` if the resource has the resource ID. The value of From 7683911882323b2313673510ebb09722c3b0cdad Mon Sep 17 00:00:00 2001 From: xiaoweim Date: Fri, 14 Jun 2024 20:47:32 +0000 Subject: [PATCH 072/101] fix: add getter expander config --- .../composition/config/release/kustomization.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/experiments/compositions/composition/config/release/kustomization.yaml b/experiments/compositions/composition/config/release/kustomization.yaml index ae3559ffa9..faa8fbf090 100644 --- a/experiments/compositions/composition/config/release/kustomization.yaml +++ b/experiments/compositions/composition/config/release/kustomization.yaml @@ -164,7 +164,7 @@ patches: name: controller-manager namespace: system - patch: '[{"op": "replace", "path": "/spec/template/spec/containers/1/image", - "value": "gcr.io/krmapihosting-release/composition:v0.0.1.alpha"}]' + "value": "gcr.io/krmapihosting-release/composition:v0.0.328"}]' target: kind: Deployment name: controller-manager @@ -175,5 +175,11 @@ patches: kind: Deployment name: jinja2-v0.0.1 namespace: system +- patch: '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", + "value": "gcr.io/krmapihosting-release/expander-getter:v0.0.1"}]' + target: + kind: Deployment + name: getter-v0.0.1 + namespace: system apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization From 6dfddb691f64fc2d1d82e19c149a1fb7adbc0122 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:50:26 +0000 Subject: [PATCH 073/101] fix: third_party changes to rename Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../compute/resource_compute_managed_ssl_certificate.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go index 6aeaa228ee..c00e1e484a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go @@ -100,7 +100,7 @@ These are in the same namespace as the managed SSL certificates.`, which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, Default: "MANAGED", }, - "certificate_id": { + "certificate_i_d": { Type: schema.TypeInt, Computed: true, Description: `The unique identifier for the resource.`, @@ -271,7 +271,7 @@ func resourceComputeManagedSslCertificateRead(d *schema.ResourceData, meta inter if err := d.Set("description", flattenComputeManagedSslCertificateDescription(res["description"], d, config)); err != nil { return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) } - if err := d.Set("certificate_id", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { + if err := d.Set("certificate_i_d", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) } if err := d.Set("name", flattenComputeManagedSslCertificateName(res["name"], d, config)); err != nil { From 7ccc934fc0f800745b3866f8a5aae28fe45d3f96 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:51:20 +0000 Subject: [PATCH 074/101] chore: make ready-pr Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- ...emanagedsslcertificates.compute.cnrm.cloud.google.com.yaml | 4 ++-- .../compute/v1beta1/computemanagedsslcertificate_types.go | 2 +- .../generated/apis/compute/v1beta1/zz_generated.deepcopy.go | 4 ++-- .../resource-docs/compute/computemanagedsslcertificate.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml index cb980a5572..92859629aa 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computemanagedsslcertificates.compute.cnrm.cloud.google.com.yaml @@ -154,7 +154,7 @@ spec: observedState: description: The observed state of the underlying GCP resource. properties: - certificateId: + certificateID: description: The unique identifier for the resource. type: integer creationTimestamp: @@ -310,7 +310,7 @@ spec: observedState: description: The observed state of the underlying GCP resource. properties: - certificateId: + certificateID: description: The unique identifier for the resource. type: integer creationTimestamp: diff --git a/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go b/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go index 4b672876a4..c87c5d5028 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computemanagedsslcertificate_types.go @@ -67,7 +67,7 @@ type ComputeManagedSSLCertificateSpec struct { type ManagedsslcertificateObservedStateStatus struct { /* The unique identifier for the resource. */ // +optional - CertificateId *int64 `json:"certificateId,omitempty"` + CertificateID *int64 `json:"certificateID,omitempty"` /* Creation timestamp in RFC3339 text format. */ // +optional diff --git a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go index cd8378fb9e..0cb6c21281 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go @@ -13197,8 +13197,8 @@ func (in *ManagedsslcertificateManaged) DeepCopy() *ManagedsslcertificateManaged // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedsslcertificateObservedStateStatus) DeepCopyInto(out *ManagedsslcertificateObservedStateStatus) { *out = *in - if in.CertificateId != nil { - in, out := &in.CertificateId, &out.CertificateId + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID *out = new(int64) **out = **in } diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md index bd73364645..49b1942e08 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computemanagedsslcertificate.md @@ -217,7 +217,7 @@ conditions: type: string observedGeneration: integer observedState: - certificateId: integer + certificateID: integer creationTimestamp: string expireTime: string selfLink: string @@ -296,7 +296,7 @@ observedState: - observedState.certificateId + observedState.certificateID

integer

{% verbatim %}The unique identifier for the resource.{% endverbatim %}

From fe4d47968cb907d9db281b74178f4debec3cb147 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:51:39 +0000 Subject: [PATCH 075/101] tests: normilzation changes Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../_generated_object_computemanagedsslcertificate.golden.yaml | 2 +- tests/e2e/normalize.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml index 424fbbb6e2..d9b2c00806 100644 --- a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computemanagedsslcertificate/_generated_object_computemanagedsslcertificate.golden.yaml @@ -28,6 +28,6 @@ status: type: Ready observedGeneration: 1 observedState: - certificateId: "1.719337333063698e+18" + certificateID: "1.719337333063698e+18" creationTimestamp: "1970-01-01T00:00:00Z" selfLink: https://compute.googleapis.com/compute/v1/projects/${projectId}/global/sslCertificates/computemanagedsslcertificate-${uniqueId} diff --git a/tests/e2e/normalize.go b/tests/e2e/normalize.go index fcedb680c7..abc9a8a095 100644 --- a/tests/e2e/normalize.go +++ b/tests/e2e/normalize.go @@ -104,7 +104,7 @@ func normalizeKRMObject(u *unstructured.Unstructured, project testgcp.GCPProject visitor.replacePaths[".status.observedState.softDeletePolicy.effectiveTime"] = "1970-01-01T00:00:00Z" // Specific to Compute SSL Certs - visitor.replacePaths[".status.observedState.certificateId"] = "1.719337333063698e+18" + visitor.replacePaths[".status.observedState.certificateID"] = "1.719337333063698e+18" visitor.sortSlices = sets.New[string]() // TODO: This should not be needed, we want to avoid churning the kube objects From ebdef71caaf04cddfe0fd8349489d6ed85678d85 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 11:21:26 -0400 Subject: [PATCH 076/101] monitoringdashboard: add support for PieChart widget --- .../v1beta1/monitoringdashboard_types.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 5 + ...ards.monitoring.cnrm.cloud.google.com.yaml | 10290 ++++++++++------ docs/releasenotes/release-1.120.md | 1 + .../v1beta1/monitoringdashboard_types.go | 20 + .../v1beta1/zz_generated.deepcopy.go | 38 + .../dashboard_generated.mappings.go | 87 +- .../direct/monitoring/dashboard_mappings.go | 8 + ...ated_export_monitoringdashboardfull.golden | 17 + ...object_monitoringdashboardfull.golden.yaml | 17 + .../monitoringdashboardfull/_http.log | 78 + .../monitoringdashboardfull/create.yaml | 28 +- .../monitoring/monitoringdashboard.md | 6632 +++++++--- 13 files changed, 11520 insertions(+), 5707 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index f99b4b2b3d..3e6ca426e4 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -466,12 +466,14 @@ type Widget struct { /*NOTYET // A widget that shows list of incidents. IncidentList *IncidentList `json:"incidentList,omitempty"` + */ // A widget that displays timeseries data as a pie chart. PieChart *PieChart `json:"pieChart,omitempty"` - // A widget that displays a list of error groups. - ErrorReportingPanel *ErrorReportingPanel `json:"errorReportingPanel,omitempty"` + /* + // A widget that displays a list of error groups. + ErrorReportingPanel *ErrorReportingPanel `json:"errorReportingPanel,omitempty"` */ // A widget that defines a section header for easier navigation of the diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 3215814d4b..d99d9b4984 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1267,6 +1267,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(LogsPanel) (*in).DeepCopyInto(*out) } + if in.PieChart != nil { + in, out := &in.PieChart, &out.PieChart + *out = new(PieChart) + (*in).DeepCopyInto(*out) + } if in.SectionHeader != nil { in, out := &in.SectionHeader, &out.SectionHeader *out = new(SectionHeader) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index af8f75523f..e08bbf1faf 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -182,847 +182,482 @@ spec: type: object type: array type: object - scorecard: - description: A scorecard summarizing time series data. + pieChart: + description: A widget that displays timeseries data + as a pie chart. properties: - gaugeView: - description: Will cause the scorecard to show - a gauge chart. - properties: - lowerBound: - description: The lower bound for this gauge - chart. The value of the chart should always - be greater than or equal to this. - format: double - type: number - upperBound: - description: The upper bound for this gauge - chart. The value of the chart should always - be less than or equal to this. - format: double - type: number - type: object - sparkChartView: - description: Will cause the scorecard to show - a spark chart. - properties: - minAlignmentPeriod: - description: The lower bound on data point - frequency in the chart implemented by specifying - the minimum alignment period to use in a - time series query. For example, if the data - is published once every 10 minutes it would - not make sense to fetch and align data at - one minute intervals. This field is optional - and exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart - to show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. + chartType: + description: Required. Indicates the visualization + type for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's + data. items: properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in - a XyChart. + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query. For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. type: string - label: - description: A label for the threshold. + sliceNameTemplate: + description: Optional. A template for the + name of the slice. This name will be displayed + in the legend and the tooltip of the pie + chart. It replaces the auto-generated + names for the slices. For example, if + the template is set to `${resource.labels.zone}`, + the zone's value will be used for the + name instead of the default name. type: string - value: - description: The value of the threshold. - The value should be defined in the native - scale of the metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + timeSeriesQuery: + description: Required. The query for the + PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header - for easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below - the section in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. - type: string - style: - description: How the text is styled - properties: - backgroundColor: - description: The background color as a hex - string. "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title - and content. The title will still be larger - relative to the content. - type: string - horizontalAlignment: - description: The horizontal alignment of both - the title and content - type: string - padding: - description: The amount of padding around - the widget - type: string - pointerLocation: - description: The pointer location for this - widget (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. - "#RRGGBB" or "#RGB" - type: string - verticalAlignment: - description: The vertical alignment of both - the title and content - type: string - type: object - type: object - title: - description: Optional. The title of the widget. - type: string - xyChart: - description: A chart of time series data. - properties: - chartOptions: - description: Display options for the chart. - properties: - mode: - description: The chart mode. - type: string - type: object - dataSets: - description: Required. The data displayed in this - chart. - items: - properties: - legendTemplate: - description: A template string for naming - `TimeSeries` in the resulting data set. - This should be a string with interpolations - of the form `${label_name}`, which will - resolve to the label's value. - type: string - minAlignmentPeriod: - description: Optional. The lower bound on - data point frequency for this data set, - implemented by specifying the minimum - alignment period to use in a time series - query For example, if the data is published - once every 10 minutes, the `min_alignment_period` - should be at least 10 minutes. It would - not make sense to fetch and align data - at one minute intervals. - type: string - plotType: - description: How this data should be plotted - on the chart. - type: string - timeSeriesQuery: - description: Required. Fields for querying - time series data from the Stackdriver - metrics API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch - time series. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member of - exactly one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is not - possible to reduce across - different resource types, - so this field implicitly contains - `resource.type`. Fields not - specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same - resource type, then the time - series are aggregated into - a single output time series. - If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. type: string + required: + - filter type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string pickTimeSeriesFilter: description: Ranking based time series filter. @@ -1049,7 +684,7 @@ spec: type: object secondaryAggregation: description: Apply a second aggregation - after `aggregation` is applied. + after the ratio is computed. properties: alignmentPeriod: description: |- @@ -1135,1686 +770,1574 @@ spec: returned. type: string type: object - required: - - filter type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio - between two time series filters. - properties: - denominator: - description: The denominator of - the ratio. - properties: - aggregation: - description: By default, the - raw time series data is returned. - Use this field to combine - multiple time series for different - views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of - fields to preserve when - `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time - series are partitioned - into subsets prior to - applying the aggregation - operation. Each subset - contains time series that - have the same value for - each of the grouping fields. - Each individual time series - is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is - not possible to reduce - across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the - time series have the same - resource type, then the - time series are aggregated - into a single output time - series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + showLabels: + description: Optional. Indicates whether or not + the pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets + type: object + scorecard: + description: A scorecard summarizing time series data. + properties: + gaugeView: + description: Will cause the scorecard to show + a gauge chart. + properties: + lowerBound: + description: The lower bound for this gauge + chart. The value of the chart should always + be greater than or equal to this. + format: double + type: number + upperBound: + description: The upper bound for this gauge + chart. The value of the chart should always + be less than or equal to this. + format: double + type: number + type: object + sparkChartView: + description: Will cause the scorecard to show + a spark chart. + properties: + minAlignmentPeriod: + description: The lower bound on data point + frequency in the chart implemented by specifying + the minimum alignment period to use in a + time series query. For example, if the data + is published once every 10 minutes it would + not make sense to fetch and align data at + one minute intervals. This field is optional + and exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart + to show in this chartView. + type: string + required: + - sparkChartType + type: object + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + As an example, consider a scorecard with the following four thresholds: - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric - types, resources, and projects - to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the - ratio. - properties: - aggregation: - description: By default, the - raw time series data is returned. - Use this field to combine - multiple time series for different - views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in + a XyChart. + type: string + label: + description: A label for the threshold. + type: string + value: + description: The value of the threshold. + The value should be defined in the native + scale of the metric. + format: double + type: number + type: object + type: array + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of - fields to preserve when - `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time - series are partitioned - into subsets prior to - applying the aggregation - operation. Each subset - contains time series that - have the same value for - each of the grouping fields. - Each individual time series - is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is - not possible to reduce - across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the - time series have the same - resource type, then the - time series are aggregated - into a single output time - series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric - types, resources, and projects - to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time - series filter. - properties: - direction: - description: How to use the - ranking to select time series - that pass through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member of - exactly one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is not - possible to reduce across - different resource types, - so this field implicitly contains - `resource.type`. Fields not - specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same - resource type, then the time - series are aggregated into - a single output time series. - If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time - series with MQL. - type: string - unitOverride: - description: The unit of data contained - in fetched time series. If non-empty, - this unit will override any unit that - accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - thresholds: - description: Threshold lines drawn horizontally - across the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in - a XyChart. - type: string - label: - description: A label for the threshold. - type: string - value: - description: The value of the threshold. - The value should be defined in the native - scale of the metric. - format: double - type: number - type: object - type: array - timeshiftDuration: - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows - values from two similar-length time periods - (e.g., week-over-week metrics). The duration - must be positive, and it can only be applied - to charts with data sets of LINE plot type. - type: string - xAxis: - description: The properties applied to the x-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a - linear scale is used. - type: string - type: object - yAxis: - description: The properties applied to the y-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a - linear scale is used. - type: string - type: object - required: - - dataSets - type: object - type: object - type: array - type: object - type: array - type: object - displayName: - description: Required. The mutable, human-readable name. - type: string - gridLayout: - description: Content is arranged with a basic layout that re-flows - a simple list of informational elements like widgets or tiles. - properties: - columns: - description: The number of columns into which the view's width - is divided. If omitted or set to zero, a system default will - be used while rendering. - format: int64 - type: integer - widgets: - description: The informational elements that are arranged into - the columns row-first. - items: - properties: - alertChart: - description: A chart of alert policy data. - properties: - alertPolicyRef: - description: Required. A reference to the MonitoringAlertPolicy. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The MonitoringAlertPolicy link in the - form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", - when not managed by KCC. - type: string - name: - description: The `name` field of a `MonitoringAlertPolicy` - resource. - type: string - namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` - resource. - type: string - type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. - type: object - collapsibleGroup: - description: A widget that groups the other widgets. All - widgets that are within the area spanned by the grouping - widget are considered member widgets. - properties: - collapsed: - description: The collapsed state of the widget on first - page load. - type: boolean - type: object - logsPanel: - description: A widget that shows a stream of logs. - properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to collect - logs for. Currently only projects are supported. If - empty, the widget will default to the host project. - items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The external name of the referenced - resource - type: string - kind: - description: Kind of the referent. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - type: object - type: array - type: object - scorecard: - description: A scorecard summarizing time series data. - properties: - gaugeView: - description: Will cause the scorecard to show a gauge - chart. - properties: - lowerBound: - description: The lower bound for this gauge chart. - The value of the chart should always be greater - than or equal to this. - format: double - type: number - upperBound: - description: The upper bound for this gauge chart. - The value of the chart should always be less than - or equal to this. - format: double - type: number - type: object - sparkChartView: - description: Will cause the scorecard to show a spark - chart. - properties: - minAlignmentPeriod: - description: The lower bound on data point frequency - in the chart implemented by specifying the minimum - alignment period to use in a time series query. - For example, if the data is published once every - 10 minutes it would not make sense to fetch and - align data at one minute intervals. This field - is optional and exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart to - show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - As an example, consider a scorecard with the following four thresholds: + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current threshold. - Direction is not allowed in a XyChart. - type: string - label: - description: A label for the threshold. - type: string - value: - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch time series. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views of - the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking to select - time series that pass through the filter. - type: string - numTimeSeries: - description: How many time series to allow - to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below + the section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex + string. "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title + and content. The title will still be larger + relative to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around + the widget + type: string + pointerLocation: + description: The pointer location for this + widget (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object + type: object + title: + description: Optional. The title of the widget. + type: string + xyChart: + description: A chart of time series data. + properties: + chartOptions: + description: Display options for the chart. + properties: + mode: + description: The chart mode. + type: string + type: object + dataSets: + description: Required. The data displayed in this + chart. + items: properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. + legendTemplate: + description: A template string for naming + `TimeSeries` in the resulting data set. + This should be a string with interpolations + of the form `${label_name}`, which will + resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. + type: string + plotType: + description: How this data should be plotted + on the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver + metrics API. properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking to select - time series that pass through the filter. - type: string - numTimeSeries: - description: How many time series to allow - to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series with - MQL. - type: string - unitOverride: - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header for - easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below the section - in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. - type: string - style: - description: How the text is styled - properties: - backgroundColor: - description: The background color as a hex string. - "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title and content. - The title will still be larger relative to the - content. - type: string - horizontalAlignment: - description: The horizontal alignment of both the - title and content - type: string - padding: - description: The amount of padding around the widget - type: string - pointerLocation: - description: The pointer location for this widget - (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. "#RRGGBB" - or "#RGB" - type: string - verticalAlignment: - description: The vertical alignment of both the - title and content - type: string - type: object - type: object - title: - description: Optional. The title of the widget. - type: string - xyChart: - description: A chart of time series data. - properties: - chartOptions: - description: Display options for the chart. - properties: - mode: - description: The chart mode. - type: string - type: object - dataSets: - description: Required. The data displayed in this chart. - items: - properties: - legendTemplate: - description: A template string for naming `TimeSeries` - in the resulting data set. This should be a - string with interpolations of the form `${label_name}`, - which will resolve to the label's value. - type: string - minAlignmentPeriod: - description: Optional. The lower bound on data - point frequency for this data set, implemented - by specifying the minimum alignment period to - use in a time series query For example, if the - data is published once every 10 minutes, the - `min_alignment_period` should be at least 10 - minutes. It would not make sense to fetch and - align data at one minute intervals. - type: string - plotType: - description: How this data should be plotted on - the chart. - type: string - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + thresholds: + description: Threshold lines drawn horizontally + across the chart. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in + a XyChart. + type: string + label: + description: A label for the threshold. + type: string + value: + description: The value of the threshold. + The value should be defined in the native + scale of the metric. + format: double + type: number + type: object + type: array + timeshiftDuration: + description: The duration used to display a comparison + chart. A comparison chart simultaneously shows + values from two similar-length time periods + (e.g., week-over-week metrics). The duration + must be positive, and it can only be applied + to charts with data sets of LINE plot type. + type: string + xAxis: + description: The properties applied to the x-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a + linear scale is used. + type: string + type: object + yAxis: + description: The properties applied to the y-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a + linear scale is used. + type: string + type: object + required: + - dataSets + type: object + type: object + type: array + type: object + type: array + type: object + displayName: + description: Required. The mutable, human-readable name. + type: string + gridLayout: + description: Content is arranged with a basic layout that re-flows + a simple list of informational elements like widgets or tiles. + properties: + columns: + description: The number of columns into which the view's width + is divided. If omitted or set to zero, a system default will + be used while rendering. + format: int64 + type: integer + widgets: + description: The informational elements that are arranged into + the columns row-first. + items: + properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link in the + form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. All + widgets that are within the area spanned by the grouping + widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on first + page load. + type: boolean + type: object + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to collect + logs for. Currently only projects are supported. If + empty, the widget will default to the host project. + items: + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data as a + pie chart. + properties: + chartType: + description: Required. Indicates the visualization type + for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on data + point frequency for this data set, implemented + by specifying the minimum alignment period to + use in a time series query. For example, if + the data is published once every 10 minutes, + the `min_alignment_period` should be at least + 10 minutes. It would not make sense to fetch + and align data at one minute intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the name + of the slice. This name will be displayed in + the legend and the tooltip of the pie chart. + It replaces the auto-generated names for the + slices. For example, if the template is set + to `${resource.labels.zone}`, the zone's value + will be used for the name instead of the default + name. + type: string + timeSeriesQuery: + description: Required. The query for the PieChart. + See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. @@ -2888,32 +2411,1700 @@ spec: returned. type: string type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - thresholds: - description: Threshold lines drawn horizontally across - the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + showLabels: + description: Optional. Indicates whether or not the + pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets + type: object + scorecard: + description: A scorecard summarizing time series data. + properties: + gaugeView: + description: Will cause the scorecard to show a gauge + chart. + properties: + lowerBound: + description: The lower bound for this gauge chart. + The value of the chart should always be greater + than or equal to this. + format: double + type: number + upperBound: + description: The upper bound for this gauge chart. + The value of the chart should always be less than + or equal to this. + format: double + type: number + type: object + sparkChartView: + description: Will cause the scorecard to show a spark + chart. + properties: + minAlignmentPeriod: + description: The lower bound on data point frequency + in the chart implemented by specifying the minimum + alignment period to use in a time series query. + For example, if the data is published once every + 10 minutes it would not make sense to fetch and + align data at one minute intervals. This field + is optional and exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart to + show in this chartView. + type: string + required: + - sparkChartType + type: object + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current threshold. + Direction is not allowed in a XyChart. + type: string + label: + description: A label for the threshold. + type: string + value: + description: The value of the threshold. The value + should be defined in the native scale of the + metric. + format: double + type: number + type: object + type: array + timeSeriesQuery: + description: Required. Fields for querying time series + data from the Stackdriver metrics API. + properties: + timeSeriesFilter: + description: Filter parameters to fetch time series. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views of + the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking to select + time series that pass through the filter. + type: string + numTimeSeries: + description: How many time series to allow + to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently to produce + the value which will be used to compare + the time series to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation after + `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking to select + time series that pass through the filter. + type: string + numTimeSeries: + description: How many time series to allow + to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently to produce + the value which will be used to compare + the time series to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation after + the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series with + MQL. + type: string + unitOverride: + description: The unit of data contained in fetched + time series. If non-empty, this unit will override + any unit that accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header for + easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the section + in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and content. + The title will still be larger relative to the + content. + type: string + horizontalAlignment: + description: The horizontal alignment of both the + title and content + type: string + padding: + description: The amount of padding around the widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. "#RRGGBB" + or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both the + title and content + type: string + type: object + type: object + title: + description: Optional. The title of the widget. + type: string + xyChart: + description: A chart of time series data. + properties: + chartOptions: + description: Display options for the chart. + properties: + mode: + description: The chart mode. + type: string + type: object + dataSets: + description: Required. The data displayed in this chart. + items: + properties: + legendTemplate: + description: A template string for naming `TimeSeries` + in the resulting data set. This should be a + string with interpolations of the form `${label_name}`, + which will resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on data + point frequency for this data set, implemented + by specifying the minimum alignment period to + use in a time series query For example, if the + data is published once every 10 minutes, the + `min_alignment_period` should be at least 10 + minutes. It would not make sense to fetch and + align data at one minute intervals. + type: string + plotType: + description: How this data should be plotted on + the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + thresholds: + description: Threshold lines drawn horizontally across + the chart. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. type: string direction: description: The direction for the current threshold. @@ -2998,96 +4189,695 @@ spec: required: - external required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The MonitoringAlertPolicy link - in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", - when not managed by KCC. - type: string - name: - description: The `name` field of a `MonitoringAlertPolicy` - resource. - type: string - namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` - resource. - type: string - type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. - type: object - collapsibleGroup: - description: A widget that groups the other widgets. - All widgets that are within the area spanned by the - grouping widget are considered member widgets. - properties: - collapsed: - description: The collapsed state of the widget on - first page load. - type: boolean - type: object - logsPanel: - description: A widget that shows a stream of logs. - properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to collect - logs for. Currently only projects are supported. - If empty, the widget will default to the host - project. - items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The external name of the referenced - resource - type: string - kind: - description: Kind of the referent. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by the + grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on + first page load. + type: boolean + type: object + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to collect + logs for. Currently only projects are supported. + If empty, the widget will default to the host + project. + items: + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data + as a pie chart. + properties: + chartType: + description: Required. Indicates the visualization + type for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's + data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum alignment + period to use in a time series query. For + example, if the data is published once every + 10 minutes, the `min_alignment_period` should + be at least 10 minutes. It would not make + sense to fetch and align data at one minute + intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the + name of the slice. This name will be displayed + in the legend and the tooltip of the pie + chart. It replaces the auto-generated names + for the slices. For example, if the template + is set to `${resource.labels.zone}`, the + zone's value will be used for the name instead + of the default name. + type: string + timeSeriesQuery: + description: Required. The query for the PieChart. + See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery type: object type: array + showLabels: + description: Optional. Indicates whether or not + the pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets type: object scorecard: description: A scorecard summarizing time series data. @@ -3113,98 +4903,514 @@ spec: description: Will cause the scorecard to show a spark chart. properties: - minAlignmentPeriod: - description: The lower bound on data point frequency - in the chart implemented by specifying the - minimum alignment period to use in a time - series query. For example, if the data is - published once every 10 minutes it would not - make sense to fetch and align data at one - minute intervals. This field is optional and - exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart - to show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) + minAlignmentPeriod: + description: The lower bound on data point frequency + in the chart implemented by specifying the + minimum alignment period to use in a time + series query. For example, if the data is + published once every 10 minutes it would not + make sense to fetch and align data at one + minute intervals. This field is optional and + exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart + to show in this chartView. + type: string + required: + - sparkChartType + type: object + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in a + XyChart. + type: string + label: + description: A label for the threshold. + type: string + value: + description: The value of the threshold. The + value should be defined in the native scale + of the metric. + format: double + type: number + type: object + type: array + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking + to select time series that pass through + the filter. + type: string + numTimeSeries: + description: How many time series to + allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently + to produce the value which will be + used to compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - As an example, consider a scorecard with the following four thresholds: + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in a - XyChart. - type: string - label: - description: A label for the threshold. - type: string - value: - description: The value of the threshold. The - value should be defined in the native scale - of the metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets prior + to applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets prior + to applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking + to select time series that pass through + the filter. + type: string + numTimeSeries: + description: How many time series to + allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently + to produce the value which will be + used to compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. properties: alignmentPeriod: description: |- @@ -3227,1387 +5433,1585 @@ spec: time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in fetched + time series. If non-empty, this unit will + override any unit that accompanies fetched + data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the + section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and + content. The title will still be larger relative + to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around the + widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object + type: object + title: + description: Optional. The title of the widget. + type: string + xyChart: + description: A chart of time series data. + properties: + chartOptions: + description: Display options for the chart. + properties: + mode: + description: The chart mode. + type: string + type: object + dataSets: + description: Required. The data displayed in this + chart. + items: + properties: + legendTemplate: + description: A template string for naming + `TimeSeries` in the resulting data set. + This should be a string with interpolations + of the form `${label_name}`, which will + resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum alignment + period to use in a time series query For + example, if the data is published once every + 10 minutes, the `min_alignment_period` should + be at least 10 minutes. It would not make + sense to fetch and align data at one minute + intervals. + type: string + plotType: + description: How this data should be plotted + on the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver metrics + API. + properties: + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking - to select time series that pass through - the filter. - type: string - numTimeSeries: - description: How many time series to - allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking - to select time series that pass through - the filter. - type: string - numTimeSeries: - description: How many time series to - allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in fetched - time series. If non-empty, this unit will - override any unit that accompanies fetched - data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header - for easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below the - section in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + thresholds: + description: Threshold lines drawn horizontally + across the chart. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in a + XyChart. + type: string + label: + description: A label for the threshold. + type: string + value: + description: The value of the threshold. The + value should be defined in the native scale + of the metric. + format: double + type: number + type: object + type: array + timeshiftDuration: + description: The duration used to display a comparison + chart. A comparison chart simultaneously shows + values from two similar-length time periods (e.g., + week-over-week metrics). The duration must be + positive, and it can only be applied to charts + with data sets of LINE plot type. type: string - style: - description: How the text is styled + xAxis: + description: The properties applied to the x-axis. properties: - backgroundColor: - description: The background color as a hex string. - "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title and - content. The title will still be larger relative - to the content. - type: string - horizontalAlignment: - description: The horizontal alignment of both - the title and content - type: string - padding: - description: The amount of padding around the - widget - type: string - pointerLocation: - description: The pointer location for this widget - (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. - "#RRGGBB" or "#RGB" + label: + description: The label of the axis. type: string - verticalAlignment: - description: The vertical alignment of both - the title and content + scale: + description: The axis scale. By default, a linear + scale is used. type: string type: object - type: object - title: - description: Optional. The title of the widget. - type: string - xyChart: - description: A chart of time series data. - properties: - chartOptions: - description: Display options for the chart. + yAxis: + description: The properties applied to the y-axis. properties: - mode: - description: The chart mode. + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a linear + scale is used. type: string type: object - dataSets: - description: Required. The data displayed in this - chart. - items: + required: + - dataSets + type: object + type: object + width: + description: The width of the tile, measured in grid blocks. + Tiles must have a minimum width of 1. + format: int32 + type: integer + xPos: + description: The zero-indexed position of the tile in grid + blocks relative to the left edge of the grid. Tiles must + be contained within the specified number of columns. `x_pos` + cannot be negative. + format: int32 + type: integer + yPos: + description: The zero-indexed position of the tile in grid + blocks relative to the top edge of the grid. `y_pos` cannot + be negative. + format: int32 + type: integer + type: object + type: array + type: object + projectRef: + description: Immutable. The Project that this resource belongs to. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The `projectID` field of a project, when not managed + by KCC. + type: string + kind: + description: The kind of the Project resource; optional but must + be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` resource. + type: string + namespace: + description: The `namespace` field of a `Project` resource. + type: string + type: object + resourceID: + description: Immutable. Optional. The name of the resource. Used for + creation and acquisition. When unset, the value of `metadata.name` + is used as the default. + type: string + rowLayout: + description: The content is divided into equally spaced rows and the + widgets are arranged horizontally. + properties: + rows: + description: The rows of content to display. + items: + properties: + weight: + description: The relative weight of this row. The row weight + is used to adjust the height of rows on the screen (relative + to peers). Greater the weight, greater the height of the + row on the screen. If omitted, a value of 1 is used while + rendering. + format: int64 + type: integer + widgets: + description: The display widgets arranged horizontally in + this row. + items: + properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external properties: - legendTemplate: - description: A template string for naming - `TimeSeries` in the resulting data set. - This should be a string with interpolations - of the form `${label_name}`, which will - resolve to the label's value. + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. type: string - minAlignmentPeriod: - description: Optional. The lower bound on - data point frequency for this data set, - implemented by specifying the minimum alignment - period to use in a time series query For - example, if the data is published once every - 10 minutes, the `min_alignment_period` should - be at least 10 minutes. It would not make - sense to fetch and align data at one minute - intervals. + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. type: string - plotType: - description: How this data should be plotted - on the chart. + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. type: string - timeSeriesQuery: - description: Required. Fields for querying - time series data from the Stackdriver metrics - API. - properties: - timeSeriesFilter: - description: Filter parameters to fetch - time series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by + the grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget + on first page load. + type: boolean + type: object + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to + collect logs for. Currently only projects are + supported. If empty, the widget will default + to the host project. + items: + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data + as a pie chart. + properties: + chartType: + description: Required. Indicates the visualization + type for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's + data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query. For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the + name of the slice. This name will be displayed + in the legend and the tooltip of the pie + chart. It replaces the auto-generated + names for the slices. For example, if + the template is set to `${resource.labels.zone}`, + the zone's value will be used for the + name instead of the default name. + type: string + timeSeriesQuery: + description: Required. The query for the + PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + The maximum value of the `alignment_period` is 2 years, or 104 weeks. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio - between two time series filters. - properties: - denominator: - description: The denominator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: + The maximum value of the `alignment_period` is 2 years, or 104 weeks. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time - series with MQL. - type: string - unitOverride: - description: The unit of data contained - in fetched time series. If non-empty, - this unit will override any unit that - accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - thresholds: - description: Threshold lines drawn horizontally - across the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in a - XyChart. - type: string - label: - description: A label for the threshold. - type: string - value: - description: The value of the threshold. The - value should be defined in the native scale - of the metric. - format: double - type: number - type: object - type: array - timeshiftDuration: - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows - values from two similar-length time periods (e.g., - week-over-week metrics). The duration must be - positive, and it can only be applied to charts - with data sets of LINE plot type. - type: string - xAxis: - description: The properties applied to the x-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object - yAxis: - description: The properties applied to the y-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object - required: - - dataSets - type: object - type: object - width: - description: The width of the tile, measured in grid blocks. - Tiles must have a minimum width of 1. - format: int32 - type: integer - xPos: - description: The zero-indexed position of the tile in grid - blocks relative to the left edge of the grid. Tiles must - be contained within the specified number of columns. `x_pos` - cannot be negative. - format: int32 - type: integer - yPos: - description: The zero-indexed position of the tile in grid - blocks relative to the top edge of the grid. `y_pos` cannot - be negative. - format: int32 - type: integer - type: object - type: array - type: object - projectRef: - description: Immutable. The Project that this resource belongs to. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The `projectID` field of a project, when not managed - by KCC. - type: string - kind: - description: The kind of the Project resource; optional but must - be `Project` if provided. - type: string - name: - description: The `name` field of a `Project` resource. - type: string - namespace: - description: The `namespace` field of a `Project` resource. - type: string - type: object - resourceID: - description: Immutable. Optional. The name of the resource. Used for - creation and acquisition. When unset, the value of `metadata.name` - is used as the default. - type: string - rowLayout: - description: The content is divided into equally spaced rows and the - widgets are arranged horizontally. - properties: - rows: - description: The rows of content to display. - items: - properties: - weight: - description: The relative weight of this row. The row weight - is used to adjust the height of rows on the screen (relative - to peers). Greater the weight, greater the height of the - row on the screen. If omitted, a value of 1 is used while - rendering. - format: int64 - type: integer - widgets: - description: The display widgets arranged horizontally in - this row. - items: - properties: - alertChart: - description: A chart of alert policy data. - properties: - alertPolicyRef: - description: Required. A reference to the MonitoringAlertPolicy. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object required: - - external - properties: - external: - description: The MonitoringAlertPolicy link - in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", - when not managed by KCC. - type: string - name: - description: The `name` field of a `MonitoringAlertPolicy` - resource. - type: string - namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` - resource. - type: string - type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. - type: object - collapsibleGroup: - description: A widget that groups the other widgets. - All widgets that are within the area spanned by - the grouping widget are considered member widgets. - properties: - collapsed: - description: The collapsed state of the widget - on first page load. - type: boolean - type: object - logsPanel: - description: A widget that shows a stream of logs. - properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to - collect logs for. Currently only projects are - supported. If empty, the widget will default - to the host project. - items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The external name of the referenced - resource - type: string - kind: - description: Kind of the referent. - type: string - name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string + - timeSeriesQuery type: object type: array + showLabels: + description: Optional. Indicates whether or not + the pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets type: object scorecard: description: A scorecard summarizing time series data. diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 8b4ca1db49..0920e45502 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -31,6 +31,7 @@ output fields from GCP APIs are in `status.observedState.*` * Added `collapsibleGroup` widgets. * Added `style` fields to text widgets. * Added `sectionHeader` widgets. + * Added `pieChart` widgets. * `StorageBucket` * Added `spec.softDeletePolicy` field. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index b84bb4c8fc..7241225d2e 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -217,6 +217,18 @@ type DashboardPickTimeSeriesFilter struct { RankingMethod *string `json:"rankingMethod,omitempty"` } +type DashboardPieChart struct { + /* Required. Indicates the visualization type for the PieChart. */ + ChartType string `json:"chartType"` + + /* Required. The queries for the chart's data. */ + DataSets []DashboardDataSets `json:"dataSets"` + + /* Optional. Indicates whether or not the pie chart should show slices' labels */ + // +optional + ShowLabels *bool `json:"showLabels,omitempty"` +} + type DashboardResourceNames struct { /* The external name of the referenced resource */ // +optional @@ -535,6 +547,10 @@ type DashboardWidget struct { // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` + /* A widget that displays timeseries data as a pie chart. */ + // +optional + PieChart *DashboardPieChart `json:"pieChart,omitempty"` + /* A scorecard summarizing time series data. */ // +optional Scorecard *DashboardScorecard `json:"scorecard,omitempty"` @@ -573,6 +589,10 @@ type DashboardWidgets struct { // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` + /* A widget that displays timeseries data as a pie chart. */ + // +optional + PieChart *DashboardPieChart `json:"pieChart,omitempty"` + /* A scorecard summarizing time series data. */ // +optional Scorecard *DashboardScorecard `json:"scorecard,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 0d6f914beb..da7bb970e0 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -883,6 +883,34 @@ func (in *DashboardPickTimeSeriesFilter) DeepCopy() *DashboardPickTimeSeriesFilt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardPieChart) DeepCopyInto(out *DashboardPieChart) { + *out = *in + if in.DataSets != nil { + in, out := &in.DataSets, &out.DataSets + *out = make([]DashboardDataSets, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShowLabels != nil { + in, out := &in.ShowLabels, &out.ShowLabels + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardPieChart. +func (in *DashboardPieChart) DeepCopy() *DashboardPieChart { + if in == nil { + return nil + } + out := new(DashboardPieChart) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardResourceNames) DeepCopyInto(out *DashboardResourceNames) { *out = *in @@ -1372,6 +1400,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardLogsPanel) (*in).DeepCopyInto(*out) } + if in.PieChart != nil { + in, out := &in.PieChart, &out.PieChart + *out = new(DashboardPieChart) + (*in).DeepCopyInto(*out) + } if in.Scorecard != nil { in, out := &in.Scorecard, &out.Scorecard *out = new(DashboardScorecard) @@ -1433,6 +1466,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardLogsPanel) (*in).DeepCopyInto(*out) } + if in.PieChart != nil { + in, out := &in.PieChart, &out.PieChart + *out = new(DashboardPieChart) + (*in).DeepCopyInto(*out) + } if in.Scorecard != nil { in, out := &in.Scorecard, &out.Scorecard *out = new(DashboardScorecard) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index a78976e89d..93309d4def 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -350,48 +350,47 @@ func PickTimeSeriesFilter_ToProto(mapCtx *MapContext, in *krm.PickTimeSeriesFilt return out } -// func PieChart_FromProto(mapCtx *MapContext, in *pb.PieChart) *krm.PieChart { -// if in == nil { -// return nil -// } -// out := &krm.PieChart{} -// out.DataSets = Slice_FromProto(mapCtx, in.DataSets, PieChart_PieChartDataSet_FromProto) -// out.ChartType = Enum_FromProto(mapCtx, in.ChartType) -// out.ShowLabels = LazyPtr(in.GetShowLabels()) -// return out -// } -// func PieChart_ToProto(mapCtx *MapContext, in *krm.PieChart) *pb.PieChart { -// if in == nil { -// return nil -// } -// out := &pb.PieChart{} -// out.DataSets = Slice_ToProto(mapCtx, in.DataSets, PieChart_PieChartDataSet_ToProto) -// out.ChartType = Enum_ToProto[pb.PieChart_PieChartType](mapCtx, in.ChartType) -// out.ShowLabels = ValueOf(in.ShowLabels) -// return out -// } +func PieChart_FromProto(mapCtx *MapContext, in *pb.PieChart) *krm.PieChart { + if in == nil { + return nil + } + out := &krm.PieChart{} + out.DataSets = Slice_FromProto(mapCtx, in.DataSets, PieChart_PieChartDataSet_FromProto) + out.ChartType = Enum_FromProto(mapCtx, in.ChartType) + out.ShowLabels = LazyPtr(in.GetShowLabels()) + return out +} +func PieChart_ToProto(mapCtx *MapContext, in *krm.PieChart) *pb.PieChart { + if in == nil { + return nil + } + out := &pb.PieChart{} + out.DataSets = Slice_ToProto(mapCtx, in.DataSets, PieChart_PieChartDataSet_ToProto) + out.ChartType = Enum_ToProto[pb.PieChart_PieChartType](mapCtx, in.ChartType) + out.ShowLabels = ValueOf(in.ShowLabels) + return out +} +func PieChart_PieChartDataSet_FromProto(mapCtx *MapContext, in *pb.PieChart_PieChartDataSet) *krm.PieChart_PieChartDataSet { + if in == nil { + return nil + } + out := &krm.PieChart_PieChartDataSet{} + out.TimeSeriesQuery = TimeSeriesQuery_FromProto(mapCtx, in.GetTimeSeriesQuery()) + out.SliceNameTemplate = LazyPtr(in.GetSliceNameTemplate()) + out.MinAlignmentPeriod = PieChartDataSet_MinAlignmentPeriod_FromProto(mapCtx, in.GetMinAlignmentPeriod()) + return out +} +func PieChart_PieChartDataSet_ToProto(mapCtx *MapContext, in *krm.PieChart_PieChartDataSet) *pb.PieChart_PieChartDataSet { + if in == nil { + return nil + } + out := &pb.PieChart_PieChartDataSet{} + out.TimeSeriesQuery = TimeSeriesQuery_ToProto(mapCtx, in.TimeSeriesQuery) + out.SliceNameTemplate = ValueOf(in.SliceNameTemplate) + out.MinAlignmentPeriod = PieChartDataSet_MinAlignmentPeriod_ToProto(mapCtx, in.MinAlignmentPeriod) + return out +} -// func PieChart_PieChartDataSet_FromProto(mapCtx *MapContext, in *pb.PieChart_PieChartDataSet) *krm.PieChart_PieChartDataSet { -// if in == nil { -// return nil -// } -// out := &krm.PieChart_PieChartDataSet{} -// out.TimeSeriesQuery = TimeSeriesQuery_FromProto(mapCtx, in.GetTimeSeriesQuery()) -// out.SliceNameTemplate = LazyPtr(in.GetSliceNameTemplate()) -// out.MinAlignmentPeriod = PieChartDataSet_MinAlignmentPeriod_FromProto(mapCtx, in.GetMinAlignmentPeriod()) -// return out -// } -// -// func PieChart_PieChartDataSet_ToProto(mapCtx *MapContext, in *krm.PieChart_PieChartDataSet) *pb.PieChart_PieChartDataSet { -// if in == nil { -// return nil -// } -// out := &pb.PieChart_PieChartDataSet{} -// out.TimeSeriesQuery = TimeSeriesQuery_ToProto(mapCtx, in.TimeSeriesQuery) -// out.SliceNameTemplate = ValueOf(in.SliceNameTemplate) -// out.MinAlignmentPeriod = PieChartDataSet_MinAlignmentPeriod_ToProto(mapCtx, in.MinAlignmentPeriod) -// return out -// } func RowLayout_FromProto(mapCtx *MapContext, in *pb.RowLayout) *krm.RowLayout { if in == nil { return nil @@ -805,7 +804,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.CollapsibleGroup = CollapsibleGroup_FromProto(mapCtx, in.GetCollapsibleGroup()) out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) // MISSING: IncidentList - // MISSING: PieChart + out.PieChart = PieChart_FromProto(mapCtx, in.GetPieChart()) // MISSING: ErrorReportingPanel out.SectionHeader = SectionHeader_FromProto(mapCtx, in.GetSectionHeader()) // MISSING: SingleViewGroup @@ -841,7 +840,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { out.Content = &pb.Widget_LogsPanel{LogsPanel: oneof} } // MISSING: IncidentList - // MISSING: PieChart + if oneof := PieChart_ToProto(mapCtx, in.PieChart); oneof != nil { + out.Content = &pb.Widget_PieChart{PieChart: oneof} + } // MISSING: ErrorReportingPanel if oneof := SectionHeader_ToProto(mapCtx, in.SectionHeader); oneof != nil { out.Content = &pb.Widget_SectionHeader{SectionHeader: oneof} diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index b4538eb0eb..7622cf277e 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -101,6 +101,14 @@ func XyChart_TimeshiftDuration_ToProto(mapCtx *MapContext, in *string) *duration return Duration_ToProto(mapCtx, in) } +func PieChartDataSet_MinAlignmentPeriod_FromProto(mapCtx *MapContext, in *durationpb.Duration) *string { + return Duration_FromProto(mapCtx, in) +} + +func PieChartDataSet_MinAlignmentPeriod_ToProto(mapCtx *MapContext, in *string) *durationpb.Duration { + return Duration_ToProto(mapCtx, in) +} + func TableDataSet_MinAlignmentPeriod_FromProto(mapCtx *MapContext, in *durationpb.Duration) *string { return Duration_FromProto(mapCtx, in) } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 28c58fc9b6..b1a580b9ef 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -62,6 +62,23 @@ spec: alertPolicyRef: external: projects/${projectId}/alertPolicies/${alertPolicyID} title: AlertChart Widget + - pieChart: + chartType: DONUT + dataSets: + - minAlignmentPeriod: 60s + sliceNameTemplate: ${resource.labels.zone} + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: "60" + perSeriesAligner: ALIGN_RATE + filter: metric.type="compute.googleapis.com/instance/disk/read_bytes_count" + resource.type="gce_instance" + secondaryAggregation: + alignmentPeriod: "60" + perSeriesAligner: ALIGN_MEAN + showLabels: true + title: PieChart Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 422e7a8bb2..fefd47a1a0 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -70,6 +70,23 @@ spec: alertPolicyRef: name: monitoringalertpolicy-${uniqueId} title: AlertChart Widget + - pieChart: + chartType: DONUT + dataSets: + - minAlignmentPeriod: 60s + sliceNameTemplate: ${resource.labels.zone} + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: 60s + perSeriesAligner: ALIGN_RATE + filter: metric.type="compute.googleapis.com/instance/disk/read_bytes_count" + resource.type="gce_instance" + secondaryAggregation: + alignmentPeriod: 60s + perSeriesAligner: ALIGN_MEAN + showLabels: true + title: PieChart Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 11c18f61dc..21c276c9c6 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -286,6 +286,32 @@ x-goog-request-params: parent=projects%2F${projectId} "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" }, "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": 2, + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 12 + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" } ] } @@ -403,6 +429,32 @@ X-Xss-Protection: 0 "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" }, "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": "DONUT", + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" } ] } @@ -528,6 +580,32 @@ X-Xss-Protection: 0 "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" }, "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": "DONUT", + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 0602c420bb..de83262134 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -75,4 +75,30 @@ spec: - title: "AlertChart Widget" alertChart: alertPolicyRef: - name: monitoringalertpolicy-${uniqueId} \ No newline at end of file + name: monitoringalertpolicy-${uniqueId} + - title: "PieChart Widget" + pieChart: + chartType: DONUT + showLabels: true + dataSets: + - sliceNameTemplate: "${resource.labels.zone}" + minAlignmentPeriod: 60s + # dimensions: + # - column: location + # columnType: STRING + # maxBinCount: 5 + # sortColumn: location + # sortOrder: SORT_ORDER_ASCENDING + # measures: + # - column: mymeasure + # aggregationFunction: + # type: count + timeSeriesQuery: + timeSeriesFilter: + filter: "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + aggregation: + alignmentPeriod: "60s" + perSeriesAligner: "ALIGN_RATE" + secondaryAggregation: + alignmentPeriod: "60s" + perSeriesAligner: "ALIGN_MEAN" diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index c0df0ac92d..6e53ab0f2e 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -96,6 +96,60 @@ columnLayout: kind: string name: string namespace: string + pieChart: + chartType: string + dataSets: + - minAlignmentPeriod: string + sliceNameTemplate: string + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + showLabels: boolean scorecard: gaugeView: lowerBound: float @@ -257,6 +311,60 @@ gridLayout: kind: string name: string namespace: string + pieChart: + chartType: string + dataSets: + - minAlignmentPeriod: string + sliceNameTemplate: string + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + showLabels: boolean scorecard: gaugeView: lowerBound: float @@ -419,6 +527,60 @@ mosaicLayout: kind: string name: string namespace: string + pieChart: + chartType: string + dataSets: + - minAlignmentPeriod: string + sliceNameTemplate: string + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + showLabels: boolean scorecard: gaugeView: lowerBound: float @@ -589,6 +751,60 @@ rowLayout: kind: string name: string namespace: string + pieChart: + chartType: string + dataSets: + - minAlignmentPeriod: string + sliceNameTemplate: string + timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + showLabels: boolean scorecard: gaugeView: lowerBound: float @@ -962,204 +1178,97 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard

+

columnLayout.columns[].widgets[].pieChart

Optional

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.gaugeView

-

Optional

+

columnLayout.columns[].widgets[].pieChart.chartType

+

Required*

+ + +

string

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].pieChart.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].pieChart.dataSets[]

+

Required*

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.gaugeView.lowerBound

+

columnLayout.columns[].widgets[].pieChart.dataSets[].minAlignmentPeriod

Optional

-

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.gaugeView.upperBound

+

columnLayout.columns[].widgets[].pieChart.dataSets[].sliceNameTemplate

Optional

-

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.sparkChartView

-

Optional

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery

+

Required*

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

-

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.sparkChartView.sparkChartType

-

Required*

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

-

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

columnLayout.columns[].widgets[].scorecard.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.thresholds[]

-

Optional

- - -

object

-

{% verbatim %}{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.thresholds[].color

-

Optional

- - -

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.thresholds[].direction

-

Optional

- - -

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.thresholds[].label

-

Optional

- - -

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.thresholds[].value

-

Optional

- - -

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery

-

Required*

- - -

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

-

Optional

- - -

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

-

Optional

- - -

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -1180,7 +1289,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -1203,7 +1312,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -1213,7 +1322,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -1223,7 +1332,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -1248,7 +1357,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -1258,7 +1367,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -1268,7 +1377,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -1278,7 +1387,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -1288,7 +1397,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -1298,7 +1407,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -1308,7 +1417,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -1329,7 +1438,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -1352,7 +1461,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -1362,7 +1471,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -1372,7 +1481,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -1397,7 +1506,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -1407,7 +1516,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -1417,7 +1526,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -1427,7 +1536,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -1448,7 +1557,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -1471,7 +1580,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -1481,7 +1590,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -1491,7 +1600,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -1516,7 +1625,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -1526,7 +1635,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -1536,7 +1645,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -1546,7 +1655,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -1567,7 +1676,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -1590,7 +1699,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -1600,7 +1709,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -1610,7 +1719,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -1635,7 +1744,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -1645,7 +1754,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -1655,7 +1764,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -1665,7 +1774,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -1675,7 +1784,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -1685,7 +1794,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -1695,7 +1804,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -1716,7 +1825,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -1739,7 +1848,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -1749,7 +1858,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -1759,7 +1868,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -1784,7 +1893,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -1794,7 +1903,7 @@ rowLayout: -

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.unitOverride

+

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -1804,237 +1913,184 @@ rowLayout: -

columnLayout.columns[].widgets[].sectionHeader

-

Optional

- - -

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].sectionHeader.dividerBelow

+

columnLayout.columns[].widgets[].pieChart.showLabels

Optional

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

-

columnLayout.columns[].widgets[].sectionHeader.subtitle

+

columnLayout.columns[].widgets[].scorecard

Optional

-

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

+

object

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

-

columnLayout.columns[].widgets[].text

+

columnLayout.columns[].widgets[].scorecard.gaugeView

Optional

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.content

+

columnLayout.columns[].widgets[].scorecard.gaugeView.lowerBound

Optional

-

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

+

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.format

+

columnLayout.columns[].widgets[].scorecard.gaugeView.upperBound

Optional

-

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

+

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style

+

columnLayout.columns[].widgets[].scorecard.sparkChartView

Optional

object

-

{% verbatim %}How the text is styled{% endverbatim %}

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style.backgroundColor

+

columnLayout.columns[].widgets[].scorecard.sparkChartView.minAlignmentPeriod

Optional

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style.fontSize

-

Optional

+

columnLayout.columns[].widgets[].scorecard.sparkChartView.sparkChartType

+

Required*

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style.horizontalAlignment

+

columnLayout.columns[].widgets[].scorecard.thresholds

Optional

-

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style.padding

+

columnLayout.columns[].widgets[].scorecard.thresholds[]

Optional

-

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

columnLayout.columns[].widgets[].text.style.pointerLocation

-

Optional

- - -

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].text.style.textColor

-

Optional

- - -

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].text.style.verticalAlignment

-

Optional

- - -

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].title

-

Optional

- - -

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].xyChart

-

Optional

- - -

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].xyChart.chartOptions

-

Optional

- - -

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].xyChart.chartOptions.mode

+

columnLayout.columns[].widgets[].scorecard.thresholds[].color

Optional

string

-

{% verbatim %}The chart mode.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].xyChart.dataSets

-

Required*

- - -

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

- - - - -

columnLayout.columns[].widgets[].xyChart.dataSets[]

-

Required*

- - -

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].legendTemplate

+

columnLayout.columns[].widgets[].scorecard.thresholds[].direction

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].minAlignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.thresholds[].label

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].plotType

+

columnLayout.columns[].widgets[].scorecard.thresholds[].value

Optional

-

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery

Required*

@@ -2044,7 +2100,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

Optional

@@ -2054,7 +2110,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -2064,7 +2120,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -2085,7 +2141,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -2108,7 +2164,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -2118,7 +2174,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -2128,7 +2184,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -2153,7 +2209,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -2163,7 +2219,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -2173,7 +2229,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -2183,7 +2239,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -2193,7 +2249,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -2203,7 +2259,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -2213,7 +2269,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -2234,7 +2290,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -2257,7 +2313,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -2267,7 +2323,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -2277,7 +2333,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -2302,7 +2358,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -2312,7 +2368,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -2322,7 +2378,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -2332,7 +2388,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -2353,7 +2409,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -2376,7 +2432,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -2386,7 +2442,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -2396,7 +2452,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -2421,7 +2477,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -2431,7 +2487,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -2441,7 +2497,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -2451,7 +2507,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -2472,7 +2528,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -2495,7 +2551,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -2505,7 +2561,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -2515,7 +2571,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -2540,7 +2596,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -2550,7 +2606,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -2560,7 +2616,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -2570,7 +2626,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -2580,7 +2636,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -2590,7 +2646,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -2600,7 +2656,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -2621,7 +2677,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -2644,7 +2700,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -2654,7 +2710,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -2664,7 +2720,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -2689,7 +2745,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -2699,7 +2755,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.unitOverride

Optional

@@ -2709,534 +2765,644 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.thresholds

+

columnLayout.columns[].widgets[].sectionHeader

Optional

-

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[]

+

columnLayout.columns[].widgets[].sectionHeader.dividerBelow

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].color

+

columnLayout.columns[].widgets[].sectionHeader.subtitle

Optional

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}The subtitle of the section{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].direction

+

columnLayout.columns[].widgets[].text

Optional

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].label

+

columnLayout.columns[].widgets[].text.content

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].value

+

columnLayout.columns[].widgets[].text.format

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.timeshiftDuration

+

columnLayout.columns[].widgets[].text.style

Optional

-

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+

object

+

{% verbatim %}How the text is styled{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis

+

columnLayout.columns[].widgets[].text.style.backgroundColor

Optional

-

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis.label

+

columnLayout.columns[].widgets[].text.style.fontSize

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis.scale

+

columnLayout.columns[].widgets[].text.style.horizontalAlignment

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis

+

columnLayout.columns[].widgets[].text.style.padding

Optional

-

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis.label

+

columnLayout.columns[].widgets[].text.style.pointerLocation

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis.scale

+

columnLayout.columns[].widgets[].text.style.textColor

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

displayName

-

Required

+

columnLayout.columns[].widgets[].text.style.verticalAlignment

+

Optional

string

-

{% verbatim %}Required. The mutable, human-readable name.{% endverbatim %}

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

-

gridLayout

+

columnLayout.columns[].widgets[].title

Optional

-

object

-

{% verbatim %}Content is arranged with a basic layout that re-flows a simple list of informational elements like widgets or tiles.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

-

gridLayout.columns

+

columnLayout.columns[].widgets[].xyChart

Optional

-

integer

-

{% verbatim %}The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering.{% endverbatim %}

+

object

+

{% verbatim %}A chart of time series data.{% endverbatim %}

-

gridLayout.widgets

+

columnLayout.columns[].widgets[].xyChart.chartOptions

Optional

-

list (object)

-

{% verbatim %}The informational elements that are arranged into the columns row-first.{% endverbatim %}

+

object

+

{% verbatim %}Display options for the chart.{% endverbatim %}

-

gridLayout.widgets[]

+

columnLayout.columns[].widgets[].xyChart.chartOptions.mode

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}The chart mode.{% endverbatim %}

-

gridLayout.widgets[].alertChart

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets

+

Required*

-

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef

+

columnLayout.columns[].widgets[].xyChart.dataSets[]

Required*

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.external

+

columnLayout.columns[].widgets[].xyChart.dataSets[].legendTemplate

Optional

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.name

+

columnLayout.columns[].widgets[].xyChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.namespace

+

columnLayout.columns[].widgets[].xyChart.dataSets[].plotType

Optional

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

-

gridLayout.widgets[].blank

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery

+

Required*

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

gridLayout.widgets[].collapsibleGroup

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

gridLayout.widgets[].collapsibleGroup.collapsed

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].logsPanel

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.filter

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

-

object

+

string

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].external

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].kind

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].name

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

-

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].namespace

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].scorecard

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

-

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

-

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView.lowerBound

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

-

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView.upperBound

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

-

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

-

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

-

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView.sparkChartType

-

Required*

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

-

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - As an example, consider a scorecard with the following four thresholds: + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

+ Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].color

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].direction

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].label

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].value

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

-

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -3246,7 +3412,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -3267,7 +3433,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -3290,7 +3456,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -3300,7 +3466,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -3310,7 +3476,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -3335,7 +3501,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -3345,7 +3511,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -3355,7 +3521,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -3365,265 +3531,3408 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds[].color

+

Optional

+ + +

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds[].direction

+

Optional

+ + +

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds[].label

+

Optional

+ + +

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.thresholds[].value

+

Optional

+ + +

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.timeshiftDuration

+

Optional

+ + +

string

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.xAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.xAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.xAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.yAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.yAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.yAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

displayName

+

Required

+ + +

string

+

{% verbatim %}Required. The mutable, human-readable name.{% endverbatim %}

+ + + + +

gridLayout

+

Optional

+ + +

object

+

{% verbatim %}Content is arranged with a basic layout that re-flows a simple list of informational elements like widgets or tiles.{% endverbatim %}

+ + + + +

gridLayout.columns

+

Optional

+ + +

integer

+

{% verbatim %}The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering.{% endverbatim %}

+ + + + +

gridLayout.widgets

+

Optional

+ + +

list (object)

+

{% verbatim %}The informational elements that are arranged into the columns row-first.{% endverbatim %}

+ + + + +

gridLayout.widgets[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

gridLayout.widgets[].blank

+

Optional

+ + +

object

+

{% verbatim %}A blank space.{% endverbatim %}

+ + + + +

gridLayout.widgets[].collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

gridLayout.widgets[].collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.filter

+

Optional

+ + +

string

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames

+

Optional

+ + +

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames[].external

+

Optional

+ + +

string

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames[].kind

+

Optional

+ + +

string

+

{% verbatim %}Kind of the referent.{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames[].name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

gridLayout.widgets[].logsPanel.resourceNames[].namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.chartType

+

Required*

+ + +

string

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[]

+

Required*

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].sliceNameTemplate

+

Optional

+ + +

string

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.showLabels

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard

+

Optional

+ + +

object

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView.lowerBound

+

Optional

+ + +

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView.upperBound

+

Optional

+ + +

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView.sparkChartType

+

Required*

+ + +

string

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].color

+

Optional

+ + +

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].direction

+

Optional

+ + +

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].label

+

Optional

+ + +

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].value

+

Optional

+ + +

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + + + +

gridLayout.widgets[].text

+

Optional

+ + +

object

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.content

+

Optional

+ + +

string

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.format

+

Optional

+ + +

string

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ + + + +

gridLayout.widgets[].title

+

Optional

+ + +

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of time series data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.chartOptions

+

Optional

+ + +

object

+

{% verbatim %}Display options for the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.chartOptions.mode

+

Optional

+ + +

string

+

{% verbatim %}The chart mode.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[]

+

Required*

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].legendTemplate

+

Optional

+ + +

string

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].plotType

+

Optional

+ + +

string

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].color

+

Optional

+ + +

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].direction

+

Optional

+ + +

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].label

+

Optional

+ + +

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].value

+

Optional

+ + +

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.timeshiftDuration

+

Optional

+ + +

string

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

mosaicLayout

+

Optional

+ + +

object

+

{% verbatim %}The content is arranged as a grid of tiles, with each content widget occupying one or more grid blocks.{% endverbatim %}

+ + + + +

mosaicLayout.columns

+

Optional

+ + +

integer

+

{% verbatim %}The number of columns in the mosaic grid. The number of columns must be between 1 and 12, inclusive.{% endverbatim %}

+ + + + +

mosaicLayout.tiles

+

Optional

+ + +

list (object)

+

{% verbatim %}The tiles to display.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].height

+

Optional

+ + +

integer

+

{% verbatim %}The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget

+

Optional

+ + +

object

+

{% verbatim %}The informational widget contained in the tile. For example an `XyChart`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.blank

+

Optional

+ + +

object

+

{% verbatim %}A blank space.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.collapsibleGroup

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.collapsibleGroup.collapsed

Optional

-

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.logsPanel

Optional

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.logsPanel.filter

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames

Optional

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames[]

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].external

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].kind

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}Kind of the referent.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].name

Optional

-

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].namespace

Optional

-

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.pieChart

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

-

Optional

+

mosaicLayout.tiles[].widget.pieChart.chartType

+

Required*

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

-

Optional

+

mosaicLayout.tiles[].widget.pieChart.dataSets

+

Required*

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

-

Optional

+

mosaicLayout.tiles[].widget.pieChart.dataSets[]

+

Required*

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].sliceNameTemplate

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery

Required*

-

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

object

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -3633,7 +6942,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -3654,7 +6963,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -3677,7 +6986,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -3687,7 +6996,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -3697,7 +7006,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -3722,7 +7031,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -3732,7 +7041,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -3742,7 +7051,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -3752,7 +7061,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -3762,7 +7071,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -3772,17 +7081,17 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -3803,7 +7112,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -3826,7 +7135,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -3836,7 +7145,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -3846,7 +7155,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -3871,287 +7180,305 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

-

Optional

- - -

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

- - - - -

gridLayout.widgets[].scorecard.timeSeriesQuery.unitOverride

-

Optional

- - -

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

- - - - -

gridLayout.widgets[].sectionHeader

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

- - - - -

gridLayout.widgets[].sectionHeader.dividerBelow

-

Optional

- - -

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].sectionHeader.subtitle

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].text

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].text.content

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].text.format

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style

-

Optional

- - -

object

-

{% verbatim %}How the text is styled{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].text.style.backgroundColor

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].text.style.fontSize

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].text.style.horizontalAlignment

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].text.style.padding

-

Optional

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].text.style.pointerLocation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

-

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].text.style.textColor

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

-

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].text.style.verticalAlignment

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].title

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].xyChart.chartOptions

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

-

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.chartOptions.mode

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The chart mode.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

-

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[]

-

Required*

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].legendTemplate

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].minAlignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

-

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].plotType

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery

-

Required*

- - -

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

-

Optional

- - -

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -4172,7 +7499,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -4195,7 +7522,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -4205,7 +7532,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -4215,7 +7542,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -4240,176 +7567,224 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

-

Required*

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.showLabels

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard

Optional

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.scorecard.gaugeView

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.scorecard.gaugeView.lowerBound

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.scorecard.gaugeView.upperBound

+

Optional

+ + +

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView.minAlignmentPeriod

Optional

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView.sparkChartType

+

Required*

+ + +

string

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.scorecard.thresholds[]

Optional

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].color

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].direction

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

-

Optional

- - -

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].label

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].value

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery

+

Required*

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -4419,7 +7794,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -4440,7 +7815,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -4463,7 +7838,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -4473,7 +7848,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -4483,7 +7858,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -4508,7 +7883,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -4518,27 +7893,57 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -4559,7 +7964,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -4582,7 +7987,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -4592,7 +7997,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -4602,7 +8007,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -4627,67 +8032,37 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

-

Required*

- - -

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio

Optional

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

-

Optional

- - -

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

-

Optional

- - -

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -4708,7 +8083,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -4731,7 +8106,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -4741,7 +8116,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -4751,7 +8126,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -4771,500 +8146,491 @@ rowLayout: series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

-

Optional

- - -

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

-

Optional

- - -

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.thresholds[]

-

Optional

- - -

object

-

{% verbatim %}{% endverbatim %}

+ returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].color

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].direction

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].label

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

-

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].value

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].xyChart.timeshiftDuration

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis.label

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis.scale

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.yAxis

-

Optional

- - -

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.yAxis.label

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].xyChart.yAxis.scale

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

-

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

mosaicLayout

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

-

object

-

{% verbatim %}The content is arranged as a grid of tiles, with each content widget occupying one or more grid blocks.{% endverbatim %}

+

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

mosaicLayout.columns

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

integer

-

{% verbatim %}The number of columns in the mosaic grid. The number of columns must be between 1 and 12, inclusive.{% endverbatim %}

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

mosaicLayout.tiles

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

-

list (object)

-

{% verbatim %}The tiles to display.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

mosaicLayout.tiles[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

mosaicLayout.tiles[].height

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

-

integer

-

{% verbatim %}The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

mosaicLayout.tiles[].widget

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

-

object

-

{% verbatim %}The informational widget contained in the tile. For example an `XyChart`.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

-

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef

-

Required*

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

-

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.external

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.name

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesQueryLanguage

Optional

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.namespace

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.unitOverride

Optional

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

-

mosaicLayout.tiles[].widget.blank

+

mosaicLayout.tiles[].widget.sectionHeader

Optional

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

-

mosaicLayout.tiles[].widget.collapsibleGroup

+

mosaicLayout.tiles[].widget.sectionHeader.dividerBelow

Optional

-

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

-

mosaicLayout.tiles[].widget.collapsibleGroup.collapsed

+

mosaicLayout.tiles[].widget.sectionHeader.subtitle

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel

+

mosaicLayout.tiles[].widget.text

Optional

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.filter

+

mosaicLayout.tiles[].widget.text.content

Optional

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames

+

mosaicLayout.tiles[].widget.text.format

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

string

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[]

+

mosaicLayout.tiles[].widget.text.style

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}How the text is styled{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].external

+

mosaicLayout.tiles[].widget.text.style.backgroundColor

Optional

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].kind

+

mosaicLayout.tiles[].widget.text.style.fontSize

Optional

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].name

+

mosaicLayout.tiles[].widget.text.style.horizontalAlignment

Optional

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].namespace

+

mosaicLayout.tiles[].widget.text.style.padding

Optional

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard

+

mosaicLayout.tiles[].widget.text.style.pointerLocation

Optional

- -

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+ +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.gaugeView

+

mosaicLayout.tiles[].widget.text.style.textColor

Optional

-

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.gaugeView.lowerBound

+

mosaicLayout.tiles[].widget.text.style.verticalAlignment

Optional

-

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.gaugeView.upperBound

+

mosaicLayout.tiles[].widget.title

Optional

-

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView

+

mosaicLayout.tiles[].widget.xyChart

Optional

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

{% verbatim %}A chart of time series data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView.minAlignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.chartOptions

Optional

-

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

object

+

{% verbatim %}Display options for the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView.sparkChartType

-

Required*

+

mosaicLayout.tiles[].widget.xyChart.chartOptions.mode

+

Optional

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+

{% verbatim %}The chart mode.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds

-

Optional

+

mosaicLayout.tiles[].widget.xyChart.dataSets

+

Required*

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[]

-

Optional

+

mosaicLayout.tiles[].widget.xyChart.dataSets[]

+

Required*

object

@@ -5273,47 +8639,37 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.thresholds[].color

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].legendTemplate

Optional

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].direction

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].label

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].plotType

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.thresholds[].value

-

Optional

- - -

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery

Required*

@@ -5323,7 +8679,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -5333,7 +8689,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -5343,7 +8699,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -5364,7 +8720,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -5387,7 +8743,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -5397,7 +8753,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -5407,7 +8763,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -5432,7 +8788,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -5442,7 +8798,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -5452,7 +8808,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -5462,7 +8818,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -5472,7 +8828,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -5482,7 +8838,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -5492,7 +8848,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -5513,7 +8869,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -5536,7 +8892,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -5546,7 +8902,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -5556,7 +8912,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -5581,7 +8937,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -5591,7 +8947,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -5601,7 +8957,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -5611,7 +8967,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -5632,7 +8988,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -5655,7 +9011,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -5665,7 +9021,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -5675,7 +9031,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -5700,7 +9056,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -5710,7 +9066,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -5720,7 +9076,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -5730,7 +9086,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -5751,7 +9107,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -5774,7 +9130,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -5784,7 +9140,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -5794,7 +9150,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -5819,7 +9175,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -5829,7 +9185,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -5839,7 +9195,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -5849,7 +9205,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -5859,7 +9215,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -5869,7 +9225,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -5879,7 +9235,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -5900,7 +9256,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -5923,7 +9279,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -5933,7 +9289,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -5943,7 +9299,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -5968,7 +9324,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -5978,7 +9334,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.unitOverride

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -5988,644 +9344,527 @@ rowLayout: -

mosaicLayout.tiles[].widget.sectionHeader

+

mosaicLayout.tiles[].widget.xyChart.thresholds

Optional

-

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.sectionHeader.dividerBelow

+

mosaicLayout.tiles[].widget.xyChart.thresholds[]

Optional

-

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.sectionHeader.subtitle

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].color

Optional

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].direction

Optional

-

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.content

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].label

Optional

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.format

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].value

Optional

-

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style

+

mosaicLayout.tiles[].widget.xyChart.timeshiftDuration

Optional

-

object

-

{% verbatim %}How the text is styled{% endverbatim %}

+

string

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.backgroundColor

+

mosaicLayout.tiles[].widget.xyChart.xAxis

Optional

-

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.fontSize

+

mosaicLayout.tiles[].widget.xyChart.xAxis.label

Optional

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.horizontalAlignment

+

mosaicLayout.tiles[].widget.xyChart.xAxis.scale

Optional

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.padding

+

mosaicLayout.tiles[].widget.xyChart.yAxis

Optional

-

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+

object

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.pointerLocation

+

mosaicLayout.tiles[].widget.xyChart.yAxis.label

Optional

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.textColor

+

mosaicLayout.tiles[].widget.xyChart.yAxis.scale

Optional

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.verticalAlignment

+

mosaicLayout.tiles[].width

Optional

-

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+

integer

+

{% verbatim %}The width of the tile, measured in grid blocks. Tiles must have a minimum width of 1.{% endverbatim %}

-

mosaicLayout.tiles[].widget.title

+

mosaicLayout.tiles[].xPos

Optional

-

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+

integer

+

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the left edge of the grid. Tiles must be contained within the specified number of columns. `x_pos` cannot be negative.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart

+

mosaicLayout.tiles[].yPos

Optional

-

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

+

integer

+

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the top edge of the grid. `y_pos` cannot be negative.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.chartOptions

-

Optional

+

projectRef

+

Required

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

+

{% verbatim %}Immutable. The Project that this resource belongs to.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.chartOptions.mode

+

projectRef.external

Optional

string

-

{% verbatim %}The chart mode.{% endverbatim %}

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets

-

Required*

+

projectRef.kind

+

Optional

-

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[]

-

Required*

+

projectRef.name

+

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].legendTemplate

+

projectRef.namespace

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].minAlignmentPeriod

+

resourceID

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].plotType

+

rowLayout

Optional

-

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+

object

+

{% verbatim %}The content is divided into equally spaced rows and the widgets are arranged horizontally.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery

-

Required*

+

rowLayout.rows

+

Optional

-

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

list (object)

+

{% verbatim %}The rows of content to display.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

rowLayout.rows[]

Optional

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

rowLayout.rows[].weight

Optional

-

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

integer

+

{% verbatim %}The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets

Optional

-

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

list (object)

+

{% verbatim %}The display widgets arranged horizontally in this row.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[]

Optional

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

rowLayout.rows[].widgets[].alertChart

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

-

Optional

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef

+

Required*

-

string

-

{% verbatim %}{% endverbatim %}

+

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.external

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

-

Required*

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.name

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.namespace

Optional

-

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].blank

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}A blank space.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].collapsibleGroup

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].collapsibleGroup.collapsed

Optional

-

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

rowLayout.rows[].widgets[].logsPanel

Optional

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].logsPanel.filter

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].logsPanel.resourceNames

Optional

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[]

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].external

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].kind

Optional

- -

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

- + +

string

+

{% verbatim %}Kind of the referent.{% endverbatim %}

+ -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].name

Optional

-

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].namespace

Optional

-

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

rowLayout.rows[].widgets[].pieChart

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

-

Optional

+

rowLayout.rows[].widgets[].pieChart.chartType

+

Required*

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets

+

Required*

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[]

+

Required*

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].pieChart.dataSets[].sliceNameTemplate

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery

Required*

-

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

object

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -6635,7 +9874,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -6656,7 +9895,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -6679,7 +9918,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -6689,7 +9928,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -6699,7 +9938,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -6724,7 +9963,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -6734,7 +9973,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -6744,7 +9983,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -6754,7 +9993,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -6764,7 +10003,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -6774,17 +10013,17 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -6805,7 +10044,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -6828,7 +10067,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -6838,7 +10077,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -6848,7 +10087,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -6867,468 +10106,425 @@ rowLayout: Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

-

Optional

- - -

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.unitOverride

-

Optional

- - -

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[]

-

Optional

- - -

object

-

{% verbatim %}{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].color

-

Optional

- - -

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].direction

-

Optional

- - -

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].label

-

Optional

- - -

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].value

-

Optional

- - -

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.timeshiftDuration

-

Optional

- - -

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.xAxis

-

Optional

- - -

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.xAxis.label

-

Optional

- - -

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.xAxis.scale

-

Optional

- - -

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.yAxis

-

Optional

- - -

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.yAxis.label

-

Optional

- - -

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.yAxis.scale

-

Optional

- - -

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

mosaicLayout.tiles[].width

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

-

integer

-

{% verbatim %}The width of the tile, measured in grid blocks. Tiles must have a minimum width of 1.{% endverbatim %}

+

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

mosaicLayout.tiles[].xPos

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

integer

-

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the left edge of the grid. Tiles must be contained within the specified number of columns. `x_pos` cannot be negative.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

mosaicLayout.tiles[].yPos

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

-

integer

-

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the top edge of the grid. `y_pos` cannot be negative.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

projectRef

-

Required

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

-

object

-

{% verbatim %}Immutable. The Project that this resource belongs to.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

projectRef.external

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

projectRef.kind

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

projectRef.name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

projectRef.namespace

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

resourceID

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

string

-

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

rowLayout

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

object

-

{% verbatim %}The content is divided into equally spaced rows and the widgets are arranged horizontally.{% endverbatim %}

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

rowLayout.rows

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

-

list (object)

-

{% verbatim %}The rows of content to display.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

rowLayout.rows[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

rowLayout.rows[].weight

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

-

integer

-

{% verbatim %}The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

rowLayout.rows[].widgets

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

list (object)

-

{% verbatim %}The display widgets arranged horizontally in this row.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

rowLayout.rows[].widgets[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

-

object

+

string

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

-

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

- -

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.external

-

Optional

-

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

-

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.namespace

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].blank

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

-

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].collapsibleGroup

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

-

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

rowLayout.rows[].widgets[].collapsibleGroup.collapsed

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.filter

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

-

object

+

string

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].external

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].kind

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

Optional

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].namespace

+

rowLayout.rows[].widgets[].pieChart.showLabels

Optional

-

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

From 3e7163cdee47a95592641c053539335af6e6b508 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 21:42:00 +0000 Subject: [PATCH 077/101] docs: mention resource promotions Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- docs/releasenotes/release-1.120.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 8b4ca1db49..712c33feed 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -15,7 +15,8 @@ TODO: list contributors with `git log v1.120.0... | grep Merge | grep from | awk *When resources are promoted from alpha to beta, we (generally) ensure they follow our best practices: use of refs on fields where appropriate, output fields from GCP APIs are in `status.observedState.*` -* `PlaceholderKind` +* `CloudIDSEndpoint` +* `ComputeMangedSSLCertificate` ## New Resources: From 50f0920af85a5fc5ad4feb530d544b083d23333a Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 21:42:27 +0000 Subject: [PATCH 078/101] docs: stage 1.122 docs Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- docs/releasenotes/release-1.122.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 docs/releasenotes/release-1.122.md diff --git a/docs/releasenotes/release-1.122.md b/docs/releasenotes/release-1.122.md new file mode 100644 index 0000000000..f45620f18e --- /dev/null +++ b/docs/releasenotes/release-1.122.md @@ -0,0 +1,26 @@ +# v1.122.0 + +** This version is not yet released; this document is gathering release notes for the future release ** + +* ... + +* Special shout-outs to ... for their + contributions to this release. +TODO: list contributors with `git log v1.121.0... | grep Merge | grep from | awk '{print $6}' | cut -d '/' -f 1 | sort | uniq` + +## Resources promoted from alpha to beta: + +*When resources are promoted from alpha to beta, we (generally) ensure they follow our best practices: use of refs on fields where appropriate, +output fields from GCP APIs are in `status.observedState.*` + +* `PlaceholderKind` + +## New Resources: + +* Added support for `PlaceholderKind` (v1beta1) resource. + +## New Fields: + +* PlaceholderKind + * Added `spec.placeholder` field. + From 3043a5712fcd0ac48bf9ff6b5c69155bedef8446 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 17:10:38 -0400 Subject: [PATCH 079/101] monitoringdashboard: add support for errorReportingPanel widget --- .../v1beta1/monitoringdashboard_types.go | 15 +- .../v1beta1/zz_generated.deepcopy.go | 12 +- ...ards.monitoring.cnrm.cloud.google.com.yaml | 268 ++++++++++ .../v1beta1/monitoringdashboard_types.go | 46 ++ .../v1beta1/zz_generated.deepcopy.go | 79 +++ .../dashboard_generated.mappings.go | 27 +- .../direct/monitoring/dashboard_mappings.go | 29 + pkg/controller/direct/monitoring/refs.go | 23 + ...ated_export_monitoringdashboardfull.golden | 11 + ...object_monitoringdashboardfull.golden.yaml | 11 + .../monitoringdashboardfull/_http.log | 51 ++ .../monitoringdashboardfull/create.yaml | 11 + .../monitoring/monitoringdashboard.md | 504 ++++++++++++++++++ 13 files changed, 1051 insertions(+), 36 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 3e6ca426e4..7ed3e1f2a5 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -297,13 +297,8 @@ type CollapsibleGroup struct { // +kcc:proto=google.monitoring.dashboard.v1.ErrorReportingPanel type ErrorReportingPanel struct { - // The resource name of the Google Cloud Platform project. Written - // as `projects/{projectID}` or `projects/{projectNumber}`, where - // `{projectID}` and `{projectNumber}` can be found in the - // [Google Cloud console](https://support.google.com/cloud/answer/6158840). - // - // Examples: `projects/my-project-123`, `projects/5551234`. - ProjectNames []string `json:"projectNames,omitempty"` + // The projects from which to gather errors. + ProjectRefs []refs.ProjectRef `json:"projectRefs,omitempty"` // An identifier of the service, such as the name of the // executable, job, or Google App Engine service name. This field is expected @@ -471,10 +466,8 @@ type Widget struct { // A widget that displays timeseries data as a pie chart. PieChart *PieChart `json:"pieChart,omitempty"` - /* - // A widget that displays a list of error groups. - ErrorReportingPanel *ErrorReportingPanel `json:"errorReportingPanel,omitempty"` - */ + // A widget that displays a list of error groups. + ErrorReportingPanel *ErrorReportingPanel `json:"errorReportingPanel,omitempty"` // A widget that defines a section header for easier navigation of the // dashboard. diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index d99d9b4984..75557f7e57 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -19,6 +19,7 @@ package v1beta1 import ( + refsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -228,9 +229,9 @@ func (in *Empty) DeepCopy() *Empty { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ErrorReportingPanel) DeepCopyInto(out *ErrorReportingPanel) { *out = *in - if in.ProjectNames != nil { - in, out := &in.ProjectNames, &out.ProjectNames - *out = make([]string, len(*in)) + if in.ProjectRefs != nil { + in, out := &in.ProjectRefs, &out.ProjectRefs + *out = make([]refsv1beta1.ProjectRef, len(*in)) copy(*out, *in) } if in.Services != nil { @@ -1272,6 +1273,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(PieChart) (*in).DeepCopyInto(*out) } + if in.ErrorReportingPanel != nil { + in, out := &in.ErrorReportingPanel, &out.ErrorReportingPanel + *out = new(ErrorReportingPanel) + (*in).DeepCopyInto(*out) + } if in.SectionHeader != nil { in, out := &in.SectionHeader, &out.SectionHeader *out = new(SectionHeader) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index e08bbf1faf..d51e4a12de 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -131,6 +131,74 @@ spec: on first page load. type: boolean type: object + errorReportingPanel: + description: A widget that displays a list of error + groups. + properties: + projectRefs: + description: The projects from which to gather + errors. + items: + description: The Project that this resource + belongs to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a + project, when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` + resource. + type: string + namespace: + description: The `namespace` field of a + `Project` resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version + that the developer provided, which could represent + a version label or a Git SHA-1 hash, for example. + For App Engine standard environment, the version + is set to the version of the app. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -2235,6 +2303,71 @@ spec: page load. type: boolean type: object + errorReportingPanel: + description: A widget that displays a list of error groups. + properties: + projectRefs: + description: The projects from which to gather errors. + items: + description: The Project that this resource belongs + to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a project, + when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` resource. + type: string + namespace: + description: The `namespace` field of a `Project` + resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version that + the developer provided, which could represent a version + label or a Git SHA-1 hash, for example. For App Engine + standard environment, the version is set to the version + of the app. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -4229,6 +4362,73 @@ spec: first page load. type: boolean type: object + errorReportingPanel: + description: A widget that displays a list of error + groups. + properties: + projectRefs: + description: The projects from which to gather errors. + items: + description: The Project that this resource belongs + to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a project, + when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` + resource. + type: string + namespace: + description: The `namespace` field of a `Project` + resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version + that the developer provided, which could represent + a version label or a Git SHA-1 hash, for example. + For App Engine standard environment, the version + is set to the version of the app. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -6348,6 +6548,74 @@ spec: on first page load. type: boolean type: object + errorReportingPanel: + description: A widget that displays a list of error + groups. + properties: + projectRefs: + description: The projects from which to gather + errors. + items: + description: The Project that this resource + belongs to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a + project, when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` + resource. + type: string + namespace: + description: The `namespace` field of a + `Project` resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version + that the developer provided, which could represent + a version label or a Git SHA-1 hash, for example. + For App Engine standard environment, the version + is set to the version of the app. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 7241225d2e..0c767767f6 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -154,6 +154,26 @@ type DashboardDenominator struct { Filter string `json:"filter"` } +type DashboardErrorReportingPanel struct { + /* The projects from which to gather errors. */ + // +optional + ProjectRefs []DashboardProjectRefs `json:"projectRefs,omitempty"` + + /* An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. */ + // +optional + Services []string `json:"services,omitempty"` + + /* Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app. */ + // +optional + Versions []string `json:"versions,omitempty"` +} + type DashboardGaugeView struct { /* The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this. */ // +optional @@ -229,6 +249,24 @@ type DashboardPieChart struct { ShowLabels *bool `json:"showLabels,omitempty"` } +type DashboardProjectRefs struct { + /* The `projectID` field of a project, when not managed by KCC. */ + // +optional + External *string `json:"external,omitempty"` + + /* The kind of the Project resource; optional but must be `Project` if provided. */ + // +optional + Kind *string `json:"kind,omitempty"` + + /* The `name` field of a `Project` resource. */ + // +optional + Name *string `json:"name,omitempty"` + + /* The `namespace` field of a `Project` resource. */ + // +optional + Namespace *string `json:"namespace,omitempty"` +} + type DashboardResourceNames struct { /* The external name of the referenced resource */ // +optional @@ -543,6 +581,10 @@ type DashboardWidget struct { // +optional CollapsibleGroup *DashboardCollapsibleGroup `json:"collapsibleGroup,omitempty"` + /* A widget that displays a list of error groups. */ + // +optional + ErrorReportingPanel *DashboardErrorReportingPanel `json:"errorReportingPanel,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` @@ -585,6 +627,10 @@ type DashboardWidgets struct { // +optional CollapsibleGroup *DashboardCollapsibleGroup `json:"collapsibleGroup,omitempty"` + /* A widget that displays a list of error groups. */ + // +optional + ErrorReportingPanel *DashboardErrorReportingPanel `json:"errorReportingPanel,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index da7bb970e0..4aa79bfaf0 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -721,6 +721,39 @@ func (in *DashboardDenominator) DeepCopy() *DashboardDenominator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardErrorReportingPanel) DeepCopyInto(out *DashboardErrorReportingPanel) { + *out = *in + if in.ProjectRefs != nil { + in, out := &in.ProjectRefs, &out.ProjectRefs + *out = make([]DashboardProjectRefs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardErrorReportingPanel. +func (in *DashboardErrorReportingPanel) DeepCopy() *DashboardErrorReportingPanel { + if in == nil { + return nil + } + out := new(DashboardErrorReportingPanel) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardGaugeView) DeepCopyInto(out *DashboardGaugeView) { *out = *in @@ -911,6 +944,42 @@ func (in *DashboardPieChart) DeepCopy() *DashboardPieChart { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardProjectRefs) DeepCopyInto(out *DashboardProjectRefs) { + *out = *in + if in.External != nil { + in, out := &in.External, &out.External + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardProjectRefs. +func (in *DashboardProjectRefs) DeepCopy() *DashboardProjectRefs { + if in == nil { + return nil + } + out := new(DashboardProjectRefs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardResourceNames) DeepCopyInto(out *DashboardResourceNames) { *out = *in @@ -1395,6 +1464,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardCollapsibleGroup) (*in).DeepCopyInto(*out) } + if in.ErrorReportingPanel != nil { + in, out := &in.ErrorReportingPanel, &out.ErrorReportingPanel + *out = new(DashboardErrorReportingPanel) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) @@ -1461,6 +1535,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardCollapsibleGroup) (*in).DeepCopyInto(*out) } + if in.ErrorReportingPanel != nil { + in, out := &in.ErrorReportingPanel, &out.ErrorReportingPanel + *out = new(DashboardErrorReportingPanel) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 93309d4def..da7dfbd0c8 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -136,26 +136,6 @@ func ColumnLayout_Column_ToProto(mapCtx *MapContext, in *krm.ColumnLayout_Column // return out // } -func ErrorReportingPanel_FromProto(mapCtx *MapContext, in *pb.ErrorReportingPanel) *krm.ErrorReportingPanel { - if in == nil { - return nil - } - out := &krm.ErrorReportingPanel{} - out.ProjectNames = in.ProjectNames - out.Services = in.Services - out.Versions = in.Versions - return out -} -func ErrorReportingPanel_ToProto(mapCtx *MapContext, in *krm.ErrorReportingPanel) *pb.ErrorReportingPanel { - if in == nil { - return nil - } - out := &pb.ErrorReportingPanel{} - out.ProjectNames = in.ProjectNames - out.Services = in.Services - out.Versions = in.Versions - return out -} func GridLayout_FromProto(mapCtx *MapContext, in *pb.GridLayout) *krm.GridLayout { if in == nil { return nil @@ -805,7 +785,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) // MISSING: IncidentList out.PieChart = PieChart_FromProto(mapCtx, in.GetPieChart()) - // MISSING: ErrorReportingPanel + out.ErrorReportingPanel = ErrorReportingPanel_FromProto(mapCtx, in.GetErrorReportingPanel()) out.SectionHeader = SectionHeader_FromProto(mapCtx, in.GetSectionHeader()) // MISSING: SingleViewGroup // MISSING: Id @@ -843,7 +823,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { if oneof := PieChart_ToProto(mapCtx, in.PieChart); oneof != nil { out.Content = &pb.Widget_PieChart{PieChart: oneof} } - // MISSING: ErrorReportingPanel + if oneof := ErrorReportingPanel_ToProto(mapCtx, in.ErrorReportingPanel); oneof != nil { + out.Content = &pb.Widget_ErrorReportingPanel{ErrorReportingPanel: oneof} + } if oneof := SectionHeader_ToProto(mapCtx, in.SectionHeader); oneof != nil { out.Content = &pb.Widget_SectionHeader{SectionHeader: oneof} } @@ -851,6 +833,7 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { // MISSING: Id return out } + func XyChart_FromProto(mapCtx *MapContext, in *pb.XyChart) *krm.XyChart { if in == nil { return nil diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index 7622cf277e..677a0dcfd4 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -22,6 +22,7 @@ import ( pb "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb" krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/monitoring/v1beta1" + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" ) @@ -168,3 +169,31 @@ func DashboardTimeSeriesQuery_TimeSeriesQueryLanguage_ToProto(mapCtx *MapContext TimeSeriesQueryLanguage: *in, } } + +func ErrorReportingPanel_FromProto(mapCtx *MapContext, in *pb.ErrorReportingPanel) *krm.ErrorReportingPanel { + if in == nil { + return nil + } + out := &krm.ErrorReportingPanel{} + for _, projectName := range in.ProjectNames { + out.ProjectRefs = append(out.ProjectRefs, refs.ProjectRef{ + External: projectName, + }) + } + out.Services = in.Services + out.Versions = in.Versions + return out +} + +func ErrorReportingPanel_ToProto(mapCtx *MapContext, in *krm.ErrorReportingPanel) *pb.ErrorReportingPanel { + if in == nil { + return nil + } + out := &pb.ErrorReportingPanel{} + for _, projectRef := range in.ProjectRefs { + out.ProjectNames = append(out.ProjectNames, projectRef.External) + } + out.Services = in.Services + out.Versions = in.Versions + return out +} diff --git a/pkg/controller/direct/monitoring/refs.go b/pkg/controller/direct/monitoring/refs.go index 663dc92bc6..e137a634d7 100644 --- a/pkg/controller/direct/monitoring/refs.go +++ b/pkg/controller/direct/monitoring/refs.go @@ -154,6 +154,21 @@ func normalizeMonitoringAlertPolicyRef(ctx context.Context, reader client.Reader return ref, nil } +func normalizeProjectRef(ctx context.Context, reader client.Reader, src client.Object, ref *refs.ProjectRef) (*refs.ProjectRef, error) { + if ref == nil { + return nil, nil + } + + project, err := references.ResolveProject(ctx, reader, src, ref) + if err != nil { + return nil, err + } + + return &refs.ProjectRef{ + External: "projects/" + project.ProjectID, + }, nil +} + type refNormalizer struct { ctx context.Context kube client.Reader @@ -179,5 +194,13 @@ func (r *refNormalizer) VisitField(path string, v any) error { } } + if projectRef, ok := v.(*refs.ProjectRef); ok { + if ref, err := normalizeProjectRef(r.ctx, r.kube, r.src, projectRef); err != nil { + return err + } else if ref != nil { + *projectRef = *ref + } + } + return nil } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index b1a580b9ef..9da16417c8 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -79,6 +79,17 @@ spec: perSeriesAligner: ALIGN_MEAN showLabels: true title: PieChart Widget + - errorReportingPanel: + projectRefs: + - external: projects/project1 + - external: projects/project2 + services: + - foo + - bar + versions: + - v1 + - v2 + title: ErrorReporting Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index fefd47a1a0..3ceef5fe18 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -87,6 +87,17 @@ spec: perSeriesAligner: ALIGN_MEAN showLabels: true title: PieChart Widget + - errorReportingPanel: + projectRefs: + - external: projects/project1 + - external: projects/project2 + services: + - foo + - bar + versions: + - v1 + - v2 + title: ErrorReporting Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 21c276c9c6..65eb332895 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -312,6 +312,23 @@ x-goog-request-params: parent=projects%2F${projectId} "showLabels": true }, "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" } ] } @@ -455,6 +472,23 @@ X-Xss-Protection: 0 "showLabels": true }, "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" } ] } @@ -606,6 +640,23 @@ X-Xss-Protection: 0 "showLabels": true }, "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index de83262134..cda6f28ed7 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -102,3 +102,14 @@ spec: secondaryAggregation: alignmentPeriod: "60s" perSeriesAligner: "ALIGN_MEAN" + - title: "ErrorReporting Widget" + errorReportingPanel: + projectRefs: + - external: projects/project1 + - external: projects/project2 + services: + - foo + - bar + versions: + - v1 + - v2 diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 6e53ab0f2e..76c792ff59 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -89,6 +89,16 @@ columnLayout: blank: {} collapsibleGroup: collapsed: boolean + errorReportingPanel: + projectRefs: + - external: string + kind: string + name: string + namespace: string + services: + - string + versions: + - string logsPanel: filter: string resourceNames: @@ -304,6 +314,16 @@ gridLayout: blank: {} collapsibleGroup: collapsed: boolean + errorReportingPanel: + projectRefs: + - external: string + kind: string + name: string + namespace: string + services: + - string + versions: + - string logsPanel: filter: string resourceNames: @@ -520,6 +540,16 @@ mosaicLayout: blank: {} collapsibleGroup: collapsed: boolean + errorReportingPanel: + projectRefs: + - external: string + kind: string + name: string + namespace: string + services: + - string + versions: + - string logsPanel: filter: string resourceNames: @@ -744,6 +774,16 @@ rowLayout: blank: {} collapsibleGroup: collapsed: boolean + errorReportingPanel: + projectRefs: + - external: string + kind: string + name: string + namespace: string + services: + - string + versions: + - string logsPanel: filter: string resourceNames: @@ -1096,6 +1136,122 @@ rowLayout:

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].errorReportingPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs

+

Optional

+ + +

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs[]

+

Optional

+ + +

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs[].external

+

Optional

+ + +

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs[].kind

+

Optional

+ + +

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs[].name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.projectRefs[].namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.services

+

Optional

+ + +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.services[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.versions

+

Optional

+ + +

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].errorReportingPanel.versions[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

columnLayout.columns[].widgets[].logsPanel

@@ -3928,6 +4084,122 @@ rowLayout:

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + +

gridLayout.widgets[].errorReportingPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs

+

Optional

+ + +

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs[]

+

Optional

+ + +

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs[].external

+

Optional

+ + +

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs[].kind

+

Optional

+ + +

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs[].name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.projectRefs[].namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.services

+

Optional

+ + +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.services[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.versions

+

Optional

+ + +

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+ + + + +

gridLayout.widgets[].errorReportingPanel.versions[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

gridLayout.widgets[].logsPanel

@@ -6770,6 +7042,122 @@ rowLayout:

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.errorReportingPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs

+

Optional

+ + +

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[]

+

Optional

+ + +

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].external

+

Optional

+ + +

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].kind

+

Optional

+ + +

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.services

+

Optional

+ + +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.services[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.versions

+

Optional

+ + +

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.versions[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.logsPanel

@@ -9702,6 +10090,122 @@ rowLayout:

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].errorReportingPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs

+

Optional

+ + +

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[]

+

Optional

+ + +

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].external

+

Optional

+ + +

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].kind

+

Optional

+ + +

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.services

+

Optional

+ + +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.services[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.versions

+

Optional

+ + +

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].errorReportingPanel.versions[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

rowLayout.rows[].widgets[].logsPanel

From f449be4023fe8c60f27e60d9705fd01a52b41d57 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Thu, 27 Jun 2024 22:09:27 +0000 Subject: [PATCH 080/101] feat: use resource mapper to generate g3doc for direct resource" " --- pkg/apis/core/v1alpha1/servicemapping_types.go | 5 +++++ scripts/generate-crds/main.go | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/pkg/apis/core/v1alpha1/servicemapping_types.go b/pkg/apis/core/v1alpha1/servicemapping_types.go index 51bf4a097d..fc6af2b26b 100644 --- a/pkg/apis/core/v1alpha1/servicemapping_types.go +++ b/pkg/apis/core/v1alpha1/servicemapping_types.go @@ -54,6 +54,11 @@ type ResourceConfig struct { // If unset, the default API version of the service mapping will be used. Version *string `json:"version"` + // Direct tells if the ResourceConfig is for ConfigConnector directly managed resources. + // Directly managed resource does not use Terraform or DCL controller, and do not rely on any TF specified fields like `SkipImport` + // A direct ResourceConfig is used to generate g3doc. + Direct bool `json:"direct"` + // SkipImport skips the import step when fetching the live state of the underlying // resource. If specified, IDTemplate must also be specified, and its expanded // form will be used as the TF resource's `id` field. diff --git a/scripts/generate-crds/main.go b/scripts/generate-crds/main.go index f007b65f52..a36c88b9fd 100644 --- a/scripts/generate-crds/main.go +++ b/scripts/generate-crds/main.go @@ -122,13 +122,22 @@ func generateTFBasedCRDs() []*apiextensions.CustomResourceDefinition { log.Fatal(err) } crds := make([]*apiextensions.CustomResourceDefinition, 0) + directCount := 0 for _, rc := range rcs { + if rc.Direct { + fmt.Printf("skip generate TF-based CRD for direct resource %s\n", rc.Kind) + directCount += 1 + continue + } crd, err := crdgeneration.GenerateTF2CRD(&sm, rc) if err != nil { log.Fatalf("error generating CRD for %v: %v", rc.Name, err) } crds = append(crds, crd) } + if directCount == len(rcs) { + continue + } crd, err := mergeCRDs(crds) if err != nil { log.Fatalf("error merging CRDs for kind %v: %v", kind, err) From f44f6b6a60200853c9bd393881819bdd88cee2fb Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:55:27 +0000 Subject: [PATCH 081/101] chore: add overrides for cloudids Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- pkg/resourceoverrides/cloudids_overrides.go | 72 +++++++++++++++++++++ pkg/resourceoverrides/overrides.go | 2 + 2 files changed, 74 insertions(+) create mode 100644 pkg/resourceoverrides/cloudids_overrides.go diff --git a/pkg/resourceoverrides/cloudids_overrides.go b/pkg/resourceoverrides/cloudids_overrides.go new file mode 100644 index 0000000000..3f83b89d9d --- /dev/null +++ b/pkg/resourceoverrides/cloudids_overrides.go @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceoverrides + +import ( + "fmt" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func GetCloudIDSEndpointResourceOverrides() ResourceOverrides { + return ResourceOverrides{ + Kind: "CloudIDSEndpoint", + Overrides: []ResourceOverride{ + mapEndpointIpToEndpointIP(), + }, + } +} + +func mapEndpointIpToEndpointIP() ResourceOverride { + return ResourceOverride{ + CRDDecorate: func(crd *apiextensions.CustomResourceDefinition) error { + for _, version := range crd.Spec.Versions { + schema := version.Schema.OpenAPIV3Schema + status, ok := schema.Properties["status"] + if !ok { + return fmt.Errorf("status field not found for version %s", version.Name) + } + observedState, ok := status.Properties["observedState"] + if !ok { + return fmt.Errorf("observedState field not found for version %s", version.Name) + } + endpointIp, ok := observedState.Properties["endpointIp"] + if !ok { + return fmt.Errorf("endpointIp field not found for version %s", version.Name) + } + + // Rename endpointIp to endpointIP + observedState.Properties["endpointIP"] = endpointIp + delete(observedState.Properties, "endpointIp") + } + + return nil + }, + PostActuationTransform: func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { + endpointIp, ok := reconciled.Status["endpointIp"] + if !ok { + return fmt.Errorf("endpointIp field was not populated") + } + reconciled.Status["endpointIP"] = endpointIp + delete(reconciled.Status, "endpointIp") + + return nil + }, + } +} diff --git a/pkg/resourceoverrides/overrides.go b/pkg/resourceoverrides/overrides.go index 729628477e..8df5f9f1db 100644 --- a/pkg/resourceoverrides/overrides.go +++ b/pkg/resourceoverrides/overrides.go @@ -236,4 +236,6 @@ func init() { // IAM Handler.Register(GetIAMCustomRoleResourceOverrides()) + + Handler.Register(GetCloudIDSEndpointResourceOverrides()) } From 50f0e94fdbdd6be9b129becca8d6d912cebafa63 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 21:04:02 +0000 Subject: [PATCH 082/101] chore: make ready-pr Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- ...tion_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml | 4 ++-- .../generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go | 2 +- .../generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go | 4 ++-- .../generated/resource-docs/cloudids/cloudidsendpoint.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml index 8c18da9c9b..1bfc853cb8 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_cloudidsendpoints.cloudids.cnrm.cloud.google.com.yaml @@ -189,7 +189,7 @@ spec: description: URL of the endpoint's network address to which traffic is to be sent by Packet Mirroring. type: string - endpointIp: + endpointIP: description: Internal IP address of the endpoint's network entry point. type: string @@ -370,7 +370,7 @@ spec: description: URL of the endpoint's network address to which traffic is to be sent by Packet Mirroring. type: string - endpointIp: + endpointIP: description: Internal IP address of the endpoint's network entry point. type: string diff --git a/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go b/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go index 5be2e9d83e..79a198ca01 100644 --- a/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/cloudidsendpoint_types.go @@ -74,7 +74,7 @@ type EndpointObservedStateStatus struct { /* Internal IP address of the endpoint's network entry point. */ // +optional - EndpointIp *string `json:"endpointIp,omitempty"` + EndpointIP *string `json:"endpointIP,omitempty"` /* Last update timestamp in RFC 3339 text format. */ // +optional diff --git a/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go index 37865236f2..8242b7dfaa 100644 --- a/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/cloudids/v1beta1/zz_generated.deepcopy.go @@ -167,8 +167,8 @@ func (in *EndpointObservedStateStatus) DeepCopyInto(out *EndpointObservedStateSt *out = new(string) **out = **in } - if in.EndpointIp != nil { - in, out := &in.EndpointIp, &out.EndpointIp + if in.EndpointIP != nil { + in, out := &in.EndpointIP, &out.EndpointIP *out = new(string) **out = **in } diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md index 064240f3a4..200439580a 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudids/cloudidsendpoint.md @@ -254,7 +254,7 @@ observedGeneration: integer observedState: createTime: string endpointForwardingRule: string - endpointIp: string + endpointIP: string updateTime: string ``` @@ -343,7 +343,7 @@ observedState: - observedState.endpointIp + observedState.endpointIP

string

{% verbatim %}Internal IP address of the endpoint's network entry point.{% endverbatim %}

From d10a7f21082a7fb375797c4b44b21c1c8eb75f65 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:01:53 +0000 Subject: [PATCH 083/101] chore: update acronym extensions Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- tests/apichecks/testdata/exceptions/acronyms.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 84649cd7a1..9b2846daf0 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -77,8 +77,6 @@ [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".spec.sourceArchiveUrl" should be ".spec.sourceArchiveURL" [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".status.sourceRepository.deployedUrl" should be ".status.sourceRepository.deployedURL" [acronyms] crd=cloudfunctionsfunctions.cloudfunctions.cnrm.cloud.google.com version=v1beta1: field ".status.versionId" should be ".status.versionID" -[acronyms] crd=cloudidsendpoints.cloudids.cnrm.cloud.google.com version=v1alpha1: field ".status.observedState.endpointIp" should be ".status.observedState.endpointIP" -[acronyms] crd=cloudidsendpoints.cloudids.cnrm.cloud.google.com version=v1beta1: field ".status.observedState.endpointIp" should be ".status.observedState.endpointIP" [acronyms] crd=cloudiotdevices.cloudiot.cnrm.cloud.google.com version=v1alpha1: field ".spec.gatewayConfig.lastAccessedGatewayId" should be ".spec.gatewayConfig.lastAccessedGatewayID" [acronyms] crd=cloudiotdevices.cloudiot.cnrm.cloud.google.com version=v1alpha1: field ".status.numId" should be ".status.numID" [acronyms] crd=cloudschedulerjobs.cloudscheduler.cnrm.cloud.google.com version=v1beta1: field ".spec.appEngineHttpTarget" should be ".spec.appEngineHTTPTarget" From 47ec3c7d13528091d35926d406706c215c1d7b14 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:06:59 +0000 Subject: [PATCH 084/101] chore: update acronym extensions Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- tests/apichecks/testdata/exceptions/acronyms.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/apichecks/testdata/exceptions/acronyms.txt b/tests/apichecks/testdata/exceptions/acronyms.txt index 7edd4580ed..19b7e74b65 100644 --- a/tests/apichecks/testdata/exceptions/acronyms.txt +++ b/tests/apichecks/testdata/exceptions/acronyms.txt @@ -160,8 +160,6 @@ [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.cloudRouterIpAddress" should be ".status.cloudRouterIPAddress" [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.customerRouterIpAddress" should be ".status.customerRouterIPAddress" [acronyms] crd=computeinterconnectattachments.compute.cnrm.cloud.google.com version=v1beta1: field ".status.googleReferenceId" should be ".status.googleReferenceID" -[acronyms] crd=computemanagedsslcertificates.compute.cnrm.cloud.google.com version=v1alpha1: field ".status.observedState.certificateId" should be ".status.observedState.certificateID" -[acronyms] crd=computemanagedsslcertificates.compute.cnrm.cloud.google.com version=v1beta1: field ".status.observedState.certificateId" should be ".status.observedState.certificateID" [acronyms] crd=computenetworkfirewallpolicies.compute.cnrm.cloud.google.com version=v1beta1: field ".status.networkFirewallPolicyId" should be ".status.networkFirewallPolicyID" [acronyms] crd=computenetworkfirewallpolicies.compute.cnrm.cloud.google.com version=v1beta1: field ".status.selfLinkWithId" should be ".status.selfLinkWithID" [acronyms] crd=computenetworkfirewallpolicyrules.compute.cnrm.cloud.google.com version=v1alpha1: field ".spec.match.destIpRanges" should be ".spec.match.destIPRanges" From 65d5a73a03208fd76367e03eabb55eb6c28acd41 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:34:59 +0000 Subject: [PATCH 085/101] fixup! chore: add overrides for cloudids --- pkg/resourceoverrides/cloudids_overrides.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/pkg/resourceoverrides/cloudids_overrides.go b/pkg/resourceoverrides/cloudids_overrides.go index 3f83b89d9d..9ed46f3fa2 100644 --- a/pkg/resourceoverrides/cloudids_overrides.go +++ b/pkg/resourceoverrides/cloudids_overrides.go @@ -59,12 +59,25 @@ func mapEndpointIpToEndpointIP() ResourceOverride { return nil }, PostActuationTransform: func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { - endpointIp, ok := reconciled.Status["endpointIp"] + observedState, found := reconciled.Status["observedState"] + if !found { + // if there is no observedState there is nothing to do! + return nil + } + observedStateM, ok := observedState.(map[string]interface{}) if !ok { - return fmt.Errorf("endpointIp field was not populated") + return fmt.Errorf("cannot parse observedState map") + } + endpointIp, found := observedStateM["endpointIp"] + if !found { + // field endpointIp not populated + return nil } - reconciled.Status["endpointIP"] = endpointIp - delete(reconciled.Status, "endpointIp") + + observedStateM["endpointIP"] = endpointIp + delete(observedStateM, "endpointIp") + + reconciled.Status["observedState"] = observedStateM return nil }, From fa81fc9e4c11a5644f28715393d75e314a50efd7 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:40:07 +0000 Subject: [PATCH 086/101] chore: add cmsllcer overrides Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- pkg/resourceoverrides/compute_overrides.go | 85 ++++++++++++++++++++++ pkg/resourceoverrides/overrides.go | 1 + 2 files changed, 86 insertions(+) create mode 100644 pkg/resourceoverrides/compute_overrides.go diff --git a/pkg/resourceoverrides/compute_overrides.go b/pkg/resourceoverrides/compute_overrides.go new file mode 100644 index 0000000000..2ea4ab4be8 --- /dev/null +++ b/pkg/resourceoverrides/compute_overrides.go @@ -0,0 +1,85 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceoverrides + +import ( + "fmt" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func GetComputeMangedSSLCertificateResourceOverrides() ResourceOverrides { + return ResourceOverrides{ + Kind: "ComputeManagedSSLCertificate", + Overrides: []ResourceOverride{ + mapCertificateIdToCertificateID(), + }, + } +} + +func mapCertificateIdToCertificateID() ResourceOverride { + return ResourceOverride{ + CRDDecorate: func(crd *apiextensions.CustomResourceDefinition) error { + for _, version := range crd.Spec.Versions { + schema := version.Schema.OpenAPIV3Schema + status, ok := schema.Properties["status"] + if !ok { + return fmt.Errorf("status field not found for version %s", version.Name) + } + observedState, ok := status.Properties["observedState"] + if !ok { + return fmt.Errorf("observedState field not found for version %s", version.Name) + } + certificateId, ok := observedState.Properties["certificateId"] + if !ok { + return fmt.Errorf("certificateId field not found for version %s", version.Name) + } + + // Rename certificateId to certificateID + observedState.Properties["certificateID"] = certificateId + delete(observedState.Properties, "certificateId") + } + + return nil + }, + PostActuationTransform: func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { + observedState, found := reconciled.Status["observedState"] + if !found { + // if there is no observedState there is nothing to do! + return nil + } + observedStateM, ok := observedState.(map[string]interface{}) + if !ok { + return fmt.Errorf("cannot parse observedState map") + } + certificateId, found := observedStateM["certificateId"] + if !found { + // field certificateId not populated + return nil + } + + observedStateM["certificateID"] = certificateId + delete(observedStateM, "certificateId") + + reconciled.Status["observedState"] = observedStateM + + return nil + }, + } +} diff --git a/pkg/resourceoverrides/overrides.go b/pkg/resourceoverrides/overrides.go index 729628477e..53c1182dca 100644 --- a/pkg/resourceoverrides/overrides.go +++ b/pkg/resourceoverrides/overrides.go @@ -233,6 +233,7 @@ func init() { Handler.Register(GetRedisInstanceResourceOverrides()) Handler.Register(GetRunServiceResourceOverrides()) Handler.Register(GetAlloyDBInstanceResourceOverrides()) + Handler.Register(GetComputeMangedSSLCertificateResourceOverrides()) // IAM Handler.Register(GetIAMCustomRoleResourceOverrides()) From 207a4d78b15546840b7a5e75a9357420fdcdee79 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:44:12 +0000 Subject: [PATCH 087/101] refactor: compute_overrides Signed-off-by: Alex Pana <8968914+acpana@users.noreply.github.com> --- .../compute_backendservice.go | 64 ---------- .../compute_forwardingrule.go | 55 --------- pkg/resourceoverrides/compute_instance.go | 68 ----------- pkg/resourceoverrides/compute_overrides.go | 115 ++++++++++++++++++ 4 files changed, 115 insertions(+), 187 deletions(-) delete mode 100644 pkg/resourceoverrides/compute_backendservice.go delete mode 100644 pkg/resourceoverrides/compute_forwardingrule.go delete mode 100644 pkg/resourceoverrides/compute_instance.go diff --git a/pkg/resourceoverrides/compute_backendservice.go b/pkg/resourceoverrides/compute_backendservice.go deleted file mode 100644 index c52bf63a33..0000000000 --- a/pkg/resourceoverrides/compute_backendservice.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resourceoverrides - -import ( - "fmt" - "strings" - - "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" - - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -var ( - oauth2ClientIDPath = []string{"iap", "oauth2ClientId"} - oauth2ClientIDRefPath = []string{"iap", "oauth2ClientIdRef"} -) - -func GetComputeBackendServiceResourceOverrides() ResourceOverrides { - ro := ResourceOverrides{ - Kind: "ComputeBackendService", - } - // Preserve the legacy non-reference field 'iap.oauth2ClientId' after it is changed to - // a reference field, 'iap.oauth2ClientIdRef'. - ro.Overrides = append(ro.Overrides, keepOauth2ClientIDField()) - return ro -} - -func keepOauth2ClientIDField() ResourceOverride { - o := ResourceOverride{} - o.CRDDecorate = func(crd *apiextensions.CustomResourceDefinition) error { - return PreserveMutuallyExclusiveNonReferenceField(crd, []string{"iap"}, oauth2ClientIDRefPath[1], oauth2ClientIDPath[1]) - } - o.PreActuationTransform = func(r *k8s.Resource) error { - if err := FavorAuthoritativeFieldOverLegacyField(r, oauth2ClientIDPath, oauth2ClientIDRefPath); err != nil { - return fmt.Errorf("error handling '%v' and '%v' fields in pre-actuation transformation: %w", strings.Join(oauth2ClientIDPath, "."), strings.Join(oauth2ClientIDRefPath, "."), err) - } - return nil - } - o.PostActuationTransform = func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { - if err := PreserveUserSpecifiedLegacyField(original, reconciled, oauth2ClientIDPath...); err != nil { - return fmt.Errorf("error preserving '%v' in post-actuation transformation: %w", strings.Join(oauth2ClientIDPath, "."), err) - } - if err := PruneDefaultedAuthoritativeFieldIfOnlyLegacyFieldSpecified(original, reconciled, oauth2ClientIDPath, oauth2ClientIDRefPath); err != nil { - return fmt.Errorf("error conditionally pruning '%v' in post-actuation transformation: %w", strings.Join(oauth2ClientIDRefPath, "."), err) - } - return nil - } - return o -} diff --git a/pkg/resourceoverrides/compute_forwardingrule.go b/pkg/resourceoverrides/compute_forwardingrule.go deleted file mode 100644 index 0cbefff7a2..0000000000 --- a/pkg/resourceoverrides/compute_forwardingrule.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resourceoverrides - -import ( - "context" - "strings" - - "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/resourceoverrides/operations" - "k8s.io/klog/v2" -) - -func GetComputeForwardingRuleResourceOverrides() ResourceOverrides { - ro := ResourceOverrides{ - Kind: "ComputeForwardingRule", - } - ro.Overrides = append(ro.Overrides, noLabelsOnCreate()) - return ro -} - -func noLabelsOnCreate() ResourceOverride { - o := ResourceOverride{} - - o.PreTerraformApply = func(ctx context.Context, op *operations.PreTerraformApply) error { - // There's some unexpected validation in forwardingRules, only when targeting serviceAttachments (PSC). - // We can't specify labels in the create operation. Terraform gets this wrong: https://github.com/hashicorp/terraform-provider-google/issues/16255 - // If we want the create to succeed, we cannot pass the labels. - // This does mean that the labels won't be applied on first reconciliation, but we don't have many options here. - // We do expect the labels will be applied next-time round. - // This is a shorter-term fix, we should investigate fixing terraform or possibly replacing terraform with something we can fix directly. - if op.LiveState.Empty() { - target, ok := op.TerraformConfig.Config["target"].(string) - if ok && strings.Contains(target, "/serviceAttachments/") { - klog.Infof("removing labels before creating forwardingRule with serviceAttachment target") - delete(op.TerraformConfig.Config, "labels") - } - } - - return nil - } - - return o -} diff --git a/pkg/resourceoverrides/compute_instance.go b/pkg/resourceoverrides/compute_instance.go deleted file mode 100644 index ac87ea9ac4..0000000000 --- a/pkg/resourceoverrides/compute_instance.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resourceoverrides - -import ( - "fmt" - "strings" - - "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" - - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -var ( - networkInterfacePath = []string{"networkInterface"} - networkIPFieldPath = []string{"networkIp"} - networkIPRefFieldPath = []string{"networkIpRef"} - supportedKinds = []string{"ComputeAddress"} -) - -func GetComputeInstanceResourceOverrides() ResourceOverrides { - ro := ResourceOverrides{ - Kind: "ComputeInstance", - } - ro.Overrides = append(ro.Overrides, addNetworkIPRefField()) - return ro -} - -func addNetworkIPRefField() ResourceOverride { - o := ResourceOverride{} - o.CRDDecorate = func(crd *apiextensions.CustomResourceDefinition) error { - if err := PreserveMutuallyExclusiveNonReferenceField(crd, networkInterfacePath, networkIPRefFieldPath[0], networkIPFieldPath[0]); err != nil { - return err - } - - return EnsureReferenceFieldIsMultiKind(crd, networkInterfacePath, networkIPRefFieldPath[0], supportedKinds) - } - o.PreActuationTransform = func(r *k8s.Resource) error { - if err := FavorReferenceFieldOverNonReferenceFieldUnderSlice(r, networkInterfacePath, networkIPFieldPath, networkIPRefFieldPath); err != nil { - return fmt.Errorf("error handling '%v' and '%v' fields in pre-actuation transformation: %w", strings.Join(networkIPFieldPath, "."), strings.Join(networkIPRefFieldPath, "."), err) - } - return nil - } - o.PostActuationTransform = func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { - if err := PreserveUserSpecifiedLegacyFieldUnderSlice(original, reconciled, networkInterfacePath, networkIPFieldPath); err != nil { - return fmt.Errorf("error preserving '%v' in post-actuation transformation: %w", strings.Join(networkIPFieldPath, "."), err) - } - if err := PruneDefaultedAuthoritativeFieldIfOnlyLegacyFieldSpecifiedUnderSlice(original, reconciled, networkInterfacePath, networkIPFieldPath, networkIPRefFieldPath); err != nil { - return fmt.Errorf("error conditionally pruning '%v' in post-actuation transformation: %w", strings.Join(networkIPRefFieldPath, "."), err) - } - return nil - } - return o -} diff --git a/pkg/resourceoverrides/compute_overrides.go b/pkg/resourceoverrides/compute_overrides.go index 2ea4ab4be8..c3482ec140 100644 --- a/pkg/resourceoverrides/compute_overrides.go +++ b/pkg/resourceoverrides/compute_overrides.go @@ -15,13 +15,28 @@ package resourceoverrides import ( + "context" "fmt" + "strings" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/resourceoverrides/operations" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" +) + +var ( + networkInterfacePath = []string{"networkInterface"} + networkIPFieldPath = []string{"networkIp"} + + oauth2ClientIDPath = []string{"iap", "oauth2ClientId"} + oauth2ClientIDRefPath = []string{"iap", "oauth2ClientIdRef"} + + networkIPRefFieldPath = []string{"networkIpRef"} + supportedKinds = []string{"ComputeAddress"} ) func GetComputeMangedSSLCertificateResourceOverrides() ResourceOverrides { @@ -83,3 +98,103 @@ func mapCertificateIdToCertificateID() ResourceOverride { }, } } + +func GetComputeInstanceResourceOverrides() ResourceOverrides { + ro := ResourceOverrides{ + Kind: "ComputeInstance", + } + ro.Overrides = append(ro.Overrides, addNetworkIPRefField()) + return ro +} + +func addNetworkIPRefField() ResourceOverride { + o := ResourceOverride{} + o.CRDDecorate = func(crd *apiextensions.CustomResourceDefinition) error { + if err := PreserveMutuallyExclusiveNonReferenceField(crd, networkInterfacePath, networkIPRefFieldPath[0], networkIPFieldPath[0]); err != nil { + return err + } + + return EnsureReferenceFieldIsMultiKind(crd, networkInterfacePath, networkIPRefFieldPath[0], supportedKinds) + } + o.PreActuationTransform = func(r *k8s.Resource) error { + if err := FavorReferenceFieldOverNonReferenceFieldUnderSlice(r, networkInterfacePath, networkIPFieldPath, networkIPRefFieldPath); err != nil { + return fmt.Errorf("error handling '%v' and '%v' fields in pre-actuation transformation: %w", strings.Join(networkIPFieldPath, "."), strings.Join(networkIPRefFieldPath, "."), err) + } + return nil + } + o.PostActuationTransform = func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { + if err := PreserveUserSpecifiedLegacyFieldUnderSlice(original, reconciled, networkInterfacePath, networkIPFieldPath); err != nil { + return fmt.Errorf("error preserving '%v' in post-actuation transformation: %w", strings.Join(networkIPFieldPath, "."), err) + } + if err := PruneDefaultedAuthoritativeFieldIfOnlyLegacyFieldSpecifiedUnderSlice(original, reconciled, networkInterfacePath, networkIPFieldPath, networkIPRefFieldPath); err != nil { + return fmt.Errorf("error conditionally pruning '%v' in post-actuation transformation: %w", strings.Join(networkIPRefFieldPath, "."), err) + } + return nil + } + return o +} + +func GetComputeForwardingRuleResourceOverrides() ResourceOverrides { + ro := ResourceOverrides{ + Kind: "ComputeForwardingRule", + } + ro.Overrides = append(ro.Overrides, noLabelsOnCreate()) + return ro +} + +func noLabelsOnCreate() ResourceOverride { + o := ResourceOverride{} + + o.PreTerraformApply = func(ctx context.Context, op *operations.PreTerraformApply) error { + // There's some unexpected validation in forwardingRules, only when targeting serviceAttachments (PSC). + // We can't specify labels in the create operation. Terraform gets this wrong: https://github.com/hashicorp/terraform-provider-google/issues/16255 + // If we want the create to succeed, we cannot pass the labels. + // This does mean that the labels won't be applied on first reconciliation, but we don't have many options here. + // We do expect the labels will be applied next-time round. + // This is a shorter-term fix, we should investigate fixing terraform or possibly replacing terraform with something we can fix directly. + if op.LiveState.Empty() { + target, ok := op.TerraformConfig.Config["target"].(string) + if ok && strings.Contains(target, "/serviceAttachments/") { + klog.Infof("removing labels before creating forwardingRule with serviceAttachment target") + delete(op.TerraformConfig.Config, "labels") + } + } + + return nil + } + + return o +} + +func GetComputeBackendServiceResourceOverrides() ResourceOverrides { + ro := ResourceOverrides{ + Kind: "ComputeBackendService", + } + // Preserve the legacy non-reference field 'iap.oauth2ClientId' after it is changed to + // a reference field, 'iap.oauth2ClientIdRef'. + ro.Overrides = append(ro.Overrides, keepOauth2ClientIDField()) + return ro +} + +func keepOauth2ClientIDField() ResourceOverride { + o := ResourceOverride{} + o.CRDDecorate = func(crd *apiextensions.CustomResourceDefinition) error { + return PreserveMutuallyExclusiveNonReferenceField(crd, []string{"iap"}, oauth2ClientIDRefPath[1], oauth2ClientIDPath[1]) + } + o.PreActuationTransform = func(r *k8s.Resource) error { + if err := FavorAuthoritativeFieldOverLegacyField(r, oauth2ClientIDPath, oauth2ClientIDRefPath); err != nil { + return fmt.Errorf("error handling '%v' and '%v' fields in pre-actuation transformation: %w", strings.Join(oauth2ClientIDPath, "."), strings.Join(oauth2ClientIDRefPath, "."), err) + } + return nil + } + o.PostActuationTransform = func(original, reconciled *k8s.Resource, tfState *terraform.InstanceState, dclState *unstructured.Unstructured) error { + if err := PreserveUserSpecifiedLegacyField(original, reconciled, oauth2ClientIDPath...); err != nil { + return fmt.Errorf("error preserving '%v' in post-actuation transformation: %w", strings.Join(oauth2ClientIDPath, "."), err) + } + if err := PruneDefaultedAuthoritativeFieldIfOnlyLegacyFieldSpecified(original, reconciled, oauth2ClientIDPath, oauth2ClientIDRefPath); err != nil { + return fmt.Errorf("error conditionally pruning '%v' in post-actuation transformation: %w", strings.Join(oauth2ClientIDRefPath, "."), err) + } + return nil + } + return o +} From 29eb12bef4aeb334d79f2a6366fe1307f64d5e80 Mon Sep 17 00:00:00 2001 From: Alex Pana <8968914+acpana@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:56:21 +0000 Subject: [PATCH 088/101] Revert "fix: third_party changes to rename" This reverts commit 6dfddb691f64fc2d1d82e19c149a1fb7adbc0122. --- .../compute/resource_compute_managed_ssl_certificate.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go index c00e1e484a..6aeaa228ee 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/services/compute/resource_compute_managed_ssl_certificate.go @@ -100,7 +100,7 @@ These are in the same namespace as the managed SSL certificates.`, which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, Default: "MANAGED", }, - "certificate_i_d": { + "certificate_id": { Type: schema.TypeInt, Computed: true, Description: `The unique identifier for the resource.`, @@ -271,7 +271,7 @@ func resourceComputeManagedSslCertificateRead(d *schema.ResourceData, meta inter if err := d.Set("description", flattenComputeManagedSslCertificateDescription(res["description"], d, config)); err != nil { return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) } - if err := d.Set("certificate_i_d", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { + if err := d.Set("certificate_id", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) } if err := d.Set("name", flattenComputeManagedSslCertificateName(res["name"], d, config)); err != nil { From f609528092eb83ad61e2bad3e6b40bd8d2489009 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 27 Jun 2024 22:05:11 -0400 Subject: [PATCH 089/101] monitoringdashboard: add id, targetAxis, singleViewGroup * Added `singleViewGroup` widgets. * Added `id` field to all widgets. * Added `targetAxis` field to thresholds. --- .../v1beta1/monitoringdashboard_types.go | 5 - .../v1beta1/zz_generated.deepcopy.go | 15 ++ ...ards.monitoring.cnrm.cloud.google.com.yaml | 76 ++++++++ docs/releasenotes/release-1.120.md | 7 +- .../v1beta1/monitoringdashboard_types.go | 23 +++ .../v1beta1/zz_generated.deepcopy.go | 41 ++++ .../dashboard_generated.mappings.go | 15 +- ...ated_export_monitoringdashboardfull.golden | 10 +- ...object_monitoringdashboardfull.golden.yaml | 10 +- .../monitoringdashboardfull/_http.log | 39 ++++ .../monitoringdashboardfull/create.yaml | 8 + .../monitoring/monitoringdashboard.md | 176 ++++++++++++++++++ 12 files changed, 409 insertions(+), 16 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 7ed3e1f2a5..5d90b72a36 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -473,15 +473,12 @@ type Widget struct { // dashboard. SectionHeader *SectionHeader `json:"sectionHeader,omitempty"` - /*NOTYET // A widget that groups the other widgets by using a dropdown menu. SingleViewGroup *SingleViewGroup `json:"singleViewGroup,omitempty"` - // Optional. The widget id. Ids may be made up of alphanumerics, dashes and // underscores. Widget ids are optional. Id *string `json:"id,omitempty"` - */ // A chart of alert policy data. AlertChart *AlertChart `json:"alertChart,omitempty"` @@ -508,11 +505,9 @@ type Threshold struct { // XyChart. Direction *string `json:"direction,omitempty"` - /*NOTYET // The target axis to use for plotting the threshold. Target axis is not // allowed in a Scorecard. TargetAxis *string `json:"targetAxis,omitempty"` - */ } // +kcc:proto=google.monitoring.dashboard.v1.TimeSeriesFilter diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 75557f7e57..c0c3140fca 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -991,6 +991,11 @@ func (in *Threshold) DeepCopyInto(out *Threshold) { *out = new(string) **out = **in } + if in.TargetAxis != nil { + in, out := &in.TargetAxis, &out.TargetAxis + *out = new(string) + **out = **in + } return } @@ -1283,6 +1288,16 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(SectionHeader) (*in).DeepCopyInto(*out) } + if in.SingleViewGroup != nil { + in, out := &in.SingleViewGroup, &out.SingleViewGroup + *out = new(SingleViewGroup) + **out = **in + } + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } if in.AlertChart != nil { in, out := &in.AlertChart, &out.AlertChart *out = new(AlertChart) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index d51e4a12de..d0245c3cf8 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -199,6 +199,11 @@ spec: type: string type: array type: object + id: + description: Optional. The widget id. Ids may be made + up of alphanumerics, dashes and underscores. Widget + ids are optional. + type: string logsPanel: description: A widget that shows a stream of logs. properties: @@ -959,6 +964,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native @@ -1515,6 +1525,10 @@ spec: description: The subtitle of the section type: string type: object + singleViewGroup: + description: A widget that groups the other widgets + by using a dropdown menu. + type: object text: description: A raw string or markdown displaying textual content. @@ -2189,6 +2203,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native @@ -2368,6 +2387,11 @@ spec: type: string type: array type: object + id: + description: Optional. The widget id. Ids may be made up + of alphanumerics, dashes and underscores. Widget ids are + optional. + type: string logsPanel: description: A widget that shows a stream of logs. properties: @@ -3088,6 +3112,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed in + a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native scale of the @@ -3613,6 +3642,10 @@ spec: description: The subtitle of the section type: string type: object + singleViewGroup: + description: A widget that groups the other widgets by using + a dropdown menu. + type: object text: description: A raw string or markdown displaying textual content. @@ -4246,6 +4279,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed in + a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native scale of the @@ -4429,6 +4467,11 @@ spec: type: string type: array type: object + id: + description: Optional. The widget id. Ids may be made + up of alphanumerics, dashes and underscores. Widget + ids are optional. + type: string logsPanel: description: A widget that shows a stream of logs. properties: @@ -5174,6 +5217,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed + in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native scale @@ -5718,6 +5766,10 @@ spec: description: The subtitle of the section type: string type: object + singleViewGroup: + description: A widget that groups the other widgets + by using a dropdown menu. + type: object text: description: A raw string or markdown displaying textual content. @@ -6377,6 +6429,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed + in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native scale @@ -6616,6 +6673,11 @@ spec: type: string type: array type: object + id: + description: Optional. The widget id. Ids may be made + up of alphanumerics, dashes and underscores. Widget + ids are optional. + type: string logsPanel: description: A widget that shows a stream of logs. properties: @@ -7376,6 +7438,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native @@ -7932,6 +7999,10 @@ spec: description: The subtitle of the section type: string type: object + singleViewGroup: + description: A widget that groups the other widgets + by using a dropdown menu. + type: object text: description: A raw string or markdown displaying textual content. @@ -8606,6 +8677,11 @@ spec: label: description: A label for the threshold. type: string + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. + type: string value: description: The value of the threshold. The value should be defined in the native diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 0920e45502..183bfe2ddf 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -29,9 +29,12 @@ output fields from GCP APIs are in `status.observedState.*` * `MonitoringDashboard` * Added `alertChart` widgets. * Added `collapsibleGroup` widgets. - * Added `style` fields to text widgets. - * Added `sectionHeader` widgets. * Added `pieChart` widgets. + * Added `sectionHeader` widgets. + * Added `singleViewGroup` widgets. + * Added `id` field to all widgets. + * Added `style` fields to text widgets. + * Added `targetAxis` field to thresholds. * `StorageBucket` * Added `spec.softDeletePolicy` field. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 0c767767f6..6a0461eb89 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -422,6 +422,9 @@ type DashboardSectionHeader struct { Subtitle *string `json:"subtitle,omitempty"` } +type DashboardSingleViewGroup struct { +} + type DashboardSparkChartView struct { /* The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint. */ // +optional @@ -488,6 +491,10 @@ type DashboardThresholds struct { // +optional Label *string `json:"label,omitempty"` + /* The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard. */ + // +optional + TargetAxis *string `json:"targetAxis,omitempty"` + /* The value of the threshold. The value should be defined in the native scale of the metric. */ // +optional Value *float64 `json:"value,omitempty"` @@ -585,6 +592,10 @@ type DashboardWidget struct { // +optional ErrorReportingPanel *DashboardErrorReportingPanel `json:"errorReportingPanel,omitempty"` + /* Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional. */ + // +optional + Id *string `json:"id,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` @@ -601,6 +612,10 @@ type DashboardWidget struct { // +optional SectionHeader *DashboardSectionHeader `json:"sectionHeader,omitempty"` + /* A widget that groups the other widgets by using a dropdown menu. */ + // +optional + SingleViewGroup *DashboardSingleViewGroup `json:"singleViewGroup,omitempty"` + /* A raw string or markdown displaying textual content. */ // +optional Text *DashboardText `json:"text,omitempty"` @@ -631,6 +646,10 @@ type DashboardWidgets struct { // +optional ErrorReportingPanel *DashboardErrorReportingPanel `json:"errorReportingPanel,omitempty"` + /* Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional. */ + // +optional + Id *string `json:"id,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` @@ -647,6 +666,10 @@ type DashboardWidgets struct { // +optional SectionHeader *DashboardSectionHeader `json:"sectionHeader,omitempty"` + /* A widget that groups the other widgets by using a dropdown menu. */ + // +optional + SingleViewGroup *DashboardSingleViewGroup `json:"singleViewGroup,omitempty"` + /* A raw string or markdown displaying textual content. */ // +optional Text *DashboardText `json:"text,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 4aa79bfaf0..0a97512a7d 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1163,6 +1163,22 @@ func (in *DashboardSectionHeader) DeepCopy() *DashboardSectionHeader { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSingleViewGroup) DeepCopyInto(out *DashboardSingleViewGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSingleViewGroup. +func (in *DashboardSingleViewGroup) DeepCopy() *DashboardSingleViewGroup { + if in == nil { + return nil + } + out := new(DashboardSingleViewGroup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardSparkChartView) DeepCopyInto(out *DashboardSparkChartView) { *out = *in @@ -1284,6 +1300,11 @@ func (in *DashboardThresholds) DeepCopyInto(out *DashboardThresholds) { *out = new(string) **out = **in } + if in.TargetAxis != nil { + in, out := &in.TargetAxis, &out.TargetAxis + *out = new(string) + **out = **in + } if in.Value != nil { in, out := &in.Value, &out.Value *out = new(float64) @@ -1469,6 +1490,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardErrorReportingPanel) (*in).DeepCopyInto(*out) } + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) @@ -1489,6 +1515,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardSectionHeader) (*in).DeepCopyInto(*out) } + if in.SingleViewGroup != nil { + in, out := &in.SingleViewGroup, &out.SingleViewGroup + *out = new(DashboardSingleViewGroup) + **out = **in + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(DashboardText) @@ -1540,6 +1571,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardErrorReportingPanel) (*in).DeepCopyInto(*out) } + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) @@ -1560,6 +1596,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardSectionHeader) (*in).DeepCopyInto(*out) } + if in.SingleViewGroup != nil { + in, out := &in.SingleViewGroup, &out.SingleViewGroup + *out = new(DashboardSingleViewGroup) + **out = **in + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(DashboardText) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index da7dfbd0c8..50352cac49 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -592,7 +592,7 @@ func Threshold_FromProto(mapCtx *MapContext, in *pb.Threshold) *krm.Threshold { out.Value = LazyPtr(in.GetValue()) out.Color = Enum_FromProto(mapCtx, in.Color) out.Direction = Enum_FromProto(mapCtx, in.Direction) - // MISSING: TargetAxis + out.TargetAxis = Enum_FromProto(mapCtx, in.TargetAxis) return out } func Threshold_ToProto(mapCtx *MapContext, in *krm.Threshold) *pb.Threshold { @@ -604,7 +604,7 @@ func Threshold_ToProto(mapCtx *MapContext, in *krm.Threshold) *pb.Threshold { out.Value = ValueOf(in.Value) out.Color = Enum_ToProto[pb.Threshold_Color](mapCtx, in.Color) out.Direction = Enum_ToProto[pb.Threshold_Direction](mapCtx, in.Direction) - // MISSING: TargetAxis + out.TargetAxis = Enum_ToProto[pb.Threshold_TargetAxis](mapCtx, in.TargetAxis) return out } func TimeSeriesFilter_FromProto(mapCtx *MapContext, in *pb.TimeSeriesFilter) *krm.TimeSeriesFilter { @@ -787,8 +787,8 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.PieChart = PieChart_FromProto(mapCtx, in.GetPieChart()) out.ErrorReportingPanel = ErrorReportingPanel_FromProto(mapCtx, in.GetErrorReportingPanel()) out.SectionHeader = SectionHeader_FromProto(mapCtx, in.GetSectionHeader()) - // MISSING: SingleViewGroup - // MISSING: Id + out.SingleViewGroup = SingleViewGroup_FromProto(mapCtx, in.GetSingleViewGroup()) + out.Id = LazyPtr(in.GetId()) return out } func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { @@ -829,11 +829,12 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { if oneof := SectionHeader_ToProto(mapCtx, in.SectionHeader); oneof != nil { out.Content = &pb.Widget_SectionHeader{SectionHeader: oneof} } - // MISSING: SingleViewGroup - // MISSING: Id + if oneof := SingleViewGroup_ToProto(mapCtx, in.SingleViewGroup); oneof != nil { + out.Content = &pb.Widget_SingleViewGroup{SingleViewGroup: oneof} + } + out.Id = ValueOf(in.Id) return out } - func XyChart_FromProto(mapCtx *MapContext, in *pb.XyChart) *krm.XyChart { if in == nil { return nil diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 9da16417c8..66eb0ff6b3 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -7,6 +7,9 @@ spec: columns: - weight: 2 widgets: + - id: singleViewGroupWidget1 + singleViewGroup: {} + title: SingleViewGroup Widget - title: Widget 1 xyChart: dataSets: @@ -32,7 +35,8 @@ spec: pointerLocation: PL_TOP_LEFT textColor: '#fff' verticalAlignment: V_CENTER - - title: Widget 3 + - id: widget3 + title: Widget 3 xyChart: dataSets: - plotType: STACKED_BAR @@ -42,6 +46,10 @@ spec: perSeriesAligner: ALIGN_RATE filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" unitOverride: "1" + thresholds: + - label: Important + targetAxis: Y1 + value: 1.2 yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 3ceef5fe18..91069b0c18 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -16,6 +16,9 @@ spec: columns: - weight: 2 widgets: + - id: singleViewGroupWidget1 + singleViewGroup: {} + title: SingleViewGroup Widget - title: Widget 1 xyChart: dataSets: @@ -41,7 +44,8 @@ spec: pointerLocation: PL_TOP_LEFT textColor: '#fff' verticalAlignment: V_CENTER - - title: Widget 3 + - id: widget3 + title: Widget 3 xyChart: dataSets: - plotType: STACKED_BAR @@ -51,6 +55,10 @@ spec: perSeriesAligner: ALIGN_RATE filter: metric.type="agent.googleapis.com/nginx/connections/accepted_count" unitOverride: "1" + thresholds: + - label: Important + targetAxis: Y1 + value: 1.2 yAxis: label: y1Axis scale: LINEAR diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 65eb332895..13cbd5835b 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -197,6 +197,11 @@ x-goog-request-params: parent=projects%2F${projectId} { "weight": "2", "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, { "title": "Widget 1", "xyChart": { @@ -237,6 +242,7 @@ x-goog-request-params: parent=projects%2F${projectId} } }, { + "id": "widget3", "title": "Widget 3", "xyChart": { "dataSets": [ @@ -253,6 +259,13 @@ x-goog-request-params: parent=projects%2F${projectId} } } ], + "thresholds": [ + { + "label": "Important", + "targetAxis": 1, + "value": 1.2 + } + ], "yAxis": { "label": "y1Axis", "scale": 1 @@ -355,6 +368,11 @@ X-Xss-Protection: 0 { "weight": "2", "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, { "title": "Widget 1", "xyChart": { @@ -396,6 +414,7 @@ X-Xss-Protection: 0 } }, { + "id": "widget3", "title": "Widget 3", "xyChart": { "dataSets": [ @@ -413,6 +432,13 @@ X-Xss-Protection: 0 } } ], + "thresholds": [ + { + "label": "Important", + "targetAxis": "Y1", + "value": 1.2 + } + ], "yAxis": { "label": "y1Axis", "scale": "LINEAR" @@ -523,6 +549,11 @@ X-Xss-Protection: 0 { "weight": "2", "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, { "title": "Widget 1", "xyChart": { @@ -564,6 +595,7 @@ X-Xss-Protection: 0 } }, { + "id": "widget3", "title": "Widget 3", "xyChart": { "dataSets": [ @@ -581,6 +613,13 @@ X-Xss-Protection: 0 } } ], + "thresholds": [ + { + "label": "Important", + "targetAxis": "Y1", + "value": 1.2 + } + ], "yAxis": { "label": "y1Axis", "scale": "LINEAR" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index cda6f28ed7..2c4693e849 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -22,6 +22,9 @@ spec: columns: - weight: 2 widgets: + - title: "SingleViewGroup Widget" + id: singleViewGroupWidget1 + singleViewGroup: {} - title: "Widget 1" xyChart: dataSets: @@ -48,6 +51,7 @@ spec: padding: P_MEDIUM pointerLocation: PL_TOP_LEFT - title: "Widget 3" + id: widget3 xyChart: dataSets: - timeSeriesQuery: @@ -57,6 +61,10 @@ spec: perSeriesAligner: ALIGN_RATE unitOverride: "1" plotType: "STACKED_BAR" + thresholds: + - label: "Important" + targetAxis: Y1 + value: 1.2 yAxis: label: y1Axis scale: LINEAR diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 76c792ff59..71d1510baf 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -99,6 +99,7 @@ columnLayout: - string versions: - string + id: string logsPanel: filter: string resourceNames: @@ -171,6 +172,7 @@ columnLayout: - color: string direction: string label: string + targetAxis: string value: float timeSeriesQuery: timeSeriesFilter: @@ -223,6 +225,7 @@ columnLayout: sectionHeader: dividerBelow: boolean subtitle: string + singleViewGroup: {} text: content: string format: string @@ -294,6 +297,7 @@ columnLayout: - color: string direction: string label: string + targetAxis: string value: float timeshiftDuration: string xAxis: @@ -324,6 +328,7 @@ gridLayout: - string versions: - string + id: string logsPanel: filter: string resourceNames: @@ -396,6 +401,7 @@ gridLayout: - color: string direction: string label: string + targetAxis: string value: float timeSeriesQuery: timeSeriesFilter: @@ -448,6 +454,7 @@ gridLayout: sectionHeader: dividerBelow: boolean subtitle: string + singleViewGroup: {} text: content: string format: string @@ -519,6 +526,7 @@ gridLayout: - color: string direction: string label: string + targetAxis: string value: float timeshiftDuration: string xAxis: @@ -550,6 +558,7 @@ mosaicLayout: - string versions: - string + id: string logsPanel: filter: string resourceNames: @@ -622,6 +631,7 @@ mosaicLayout: - color: string direction: string label: string + targetAxis: string value: float timeSeriesQuery: timeSeriesFilter: @@ -674,6 +684,7 @@ mosaicLayout: sectionHeader: dividerBelow: boolean subtitle: string + singleViewGroup: {} text: content: string format: string @@ -745,6 +756,7 @@ mosaicLayout: - color: string direction: string label: string + targetAxis: string value: float timeshiftDuration: string xAxis: @@ -784,6 +796,7 @@ rowLayout: - string versions: - string + id: string logsPanel: filter: string resourceNames: @@ -856,6 +869,7 @@ rowLayout: - color: string direction: string label: string + targetAxis: string value: float timeSeriesQuery: timeSeriesFilter: @@ -908,6 +922,7 @@ rowLayout: sectionHeader: dividerBelow: boolean subtitle: string + singleViewGroup: {} text: content: string format: string @@ -979,6 +994,7 @@ rowLayout: - color: string direction: string label: string + targetAxis: string value: float timeshiftDuration: string xAxis: @@ -1252,6 +1268,16 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].id

+

Optional

+ + +

string

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].logsPanel

@@ -2234,6 +2260,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].scorecard.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].scorecard.thresholds[].value

@@ -2949,6 +2985,16 @@ rowLayout:

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].singleViewGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].text

@@ -3874,6 +3920,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].xyChart.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].xyChart.thresholds[].value

@@ -4200,6 +4256,16 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

gridLayout.widgets[].id

+

Optional

+ + +

string

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ +

gridLayout.widgets[].logsPanel

@@ -5182,6 +5248,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

gridLayout.widgets[].scorecard.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

gridLayout.widgets[].scorecard.thresholds[].value

@@ -5897,6 +5973,16 @@ rowLayout:

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + +

gridLayout.widgets[].singleViewGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+ +

gridLayout.widgets[].text

@@ -6822,6 +6908,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

gridLayout.widgets[].xyChart.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

gridLayout.widgets[].xyChart.thresholds[].value

@@ -7158,6 +7254,16 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.id

+

Optional

+ + +

string

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.logsPanel

@@ -8140,6 +8246,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.scorecard.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.scorecard.thresholds[].value

@@ -8855,6 +8971,16 @@ rowLayout:

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.singleViewGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.text

@@ -9780,6 +9906,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.xyChart.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.xyChart.thresholds[].value

@@ -10206,6 +10342,16 @@ rowLayout:

{% verbatim %}{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].id

+

Optional

+ + +

string

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].logsPanel

@@ -11188,6 +11334,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].scorecard.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].scorecard.thresholds[].value

@@ -11903,6 +12059,16 @@ rowLayout:

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].singleViewGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].text

@@ -12828,6 +12994,16 @@ rowLayout:

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].xyChart.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].xyChart.thresholds[].value

From 75e7135694cc37f46131188ce70825705f5e1c6a Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Thu, 27 Jun 2024 20:32:36 +0000 Subject: [PATCH 090/101] doc: Add CBWP g3doc --- ...oudbuild_v1beta1_cloudbuildworkerpool.yaml | 27 + ...oudbuild_v1beta1_cloudbuildworkerpool.yaml | 32 + .../compute_v1beta1_computeaddress.yaml | 27 + .../compute_v1beta1_computenetwork.yaml | 22 + ...g_v1beta1_servicenetworkingconnection.yaml | 26 + .../serviceusage_v1beta1_service.yaml | 22 + config/servicemappings/cloudbuild.yaml | 4 + .../cloudbuild/cloudbuildworkerpool.md | 592 ++++++++++++++++++ .../cloudbuild_cloudbuildworkerpool.tmpl | 50 ++ 9 files changed, 802 insertions(+) create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml create mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml create mode 100644 scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md create mode 100644 scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml new file mode 100644 index 0000000000..a0195a0665 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml @@ -0,0 +1,27 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 +kind: CloudBuildWorkerPool +metadata: + name: cloudbuildworkerpool-sample +spec: + projectRef: + # Replace ${PROJECT_ID?} with your project ID + external: "projects/${PROJECT_ID?}" + location: us-central1 + displayName: A sample cloud build private pool with VSA Service Control + privatePoolV1Config: + workerConfig: + machineType: e2-medium + diskSizeGb: 100 diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml new file mode 100644 index 0000000000..35246ec7b2 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml @@ -0,0 +1,32 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 +kind: CloudBuildWorkerPool +metadata: + name: cloudbuildworkerpool-${uniqueId} +spec: + projectRef: + # Replace ${PROJECT_ID?} with your project ID + external: projects/${PROJECT_ID?} + location: us-central1 + displayName: A sample cloud build private pool with custom peered network + privatePoolV1Config: + workerConfig: + machineType: e2-medium + diskSizeGb: 100 + networkConfig: + peeredNetworkRef: + name: computenetwork-dep + egressOption: NO_PUBLIC_EGRESS + peeredNetworkIPRange: /29 diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml new file mode 100644 index 0000000000..eb893b6576 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml @@ -0,0 +1,27 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeAddress +metadata: + name: computenaddress-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + location: global + purpose: VPC_PEERING + addressType: INTERNAL + prefixLength: 24 + networkRef: + name: computenetwork-dep diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml new file mode 100644 index 0000000000..b1b04ed290 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml @@ -0,0 +1,22 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computenetwork-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + autoCreateSubnetworks: false diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml new file mode 100644 index 0000000000..d8c5b67ef3 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml @@ -0,0 +1,26 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 +kind: ServiceNetworkingConnection +metadata: + name: servicenetworkconn-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + networkRef: + name: computenetwork-dep + service: servicenetworking.googleapis.com + reservedPeeringRanges: + - name: computenaddress-dep diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml new file mode 100644 index 0000000000..9c1c475a70 --- /dev/null +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml @@ -0,0 +1,22 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 +kind: Service +metadata: + name: service-sample +spec: + resourceID: servicenetworking.googleapis.com + projectRef: + external: projects/${PROJECT_ID?} diff --git a/config/servicemappings/cloudbuild.yaml b/config/servicemappings/cloudbuild.yaml index 38a844d99a..aaf85f856d 100644 --- a/config/servicemappings/cloudbuild.yaml +++ b/config/servicemappings/cloudbuild.yaml @@ -22,6 +22,10 @@ spec: version: v1beta1 serviceHostName: "cloudbuild.googleapis.com" resources: + - name: google_cloudbuild_workerpool + kind: CloudBuildWorkerPool + direct: true + - name: google_cloudbuild_trigger kind: CloudBuildTrigger metadataMapping: diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md new file mode 100644 index 0000000000..ac2f73f461 --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md @@ -0,0 +1,592 @@ +{# AUTOGENERATED. DO NOT EDIT. #} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}CloudBuildWorkerPool{% endblock %} +{% block body %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyValue
{{gcp_name_short}} Service NameCloud Build
{{gcp_name_short}} Service Documentation/cloud-build/docs/
{{gcp_name_short}} REST Resource Namev1.projects.workerpools
{{gcp_name_short}} REST Resource Documentation/cloud-build/docs/reference/rest/v1/projects.locations.workerPools
{{product_name_short}} Resource Short Namescloudbuildworkerpool
{{product_name_short}} Service Namecloudbuild.googleapis.com
{{product_name_short}} Resource Fully Qualified Namecloudbuildworkerpools.cloudbuild.cnrm.cloud.google.com
Can Be Referenced by IAMPolicy/IAMPolicyMemberNo
+ +## Custom Resource Definition Properties + + +### Annotations + + + + + + + + + + + +
Fields
cnrm.cloud.google.com/state-into-spec
+ + +### Spec +#### Schema +```yaml +displayName: string +location: string +privatePoolV1Config: + networkConfig: + egressOption: string + peeredNetworkIPRange: string + peeredNetworkRef: + external: string + name: string + namespace: string + workerConfig: + diskSizeGb: integer + machineType: string +projectRef: + external: string + kind: string + name: string + namespace: string +resourceID: string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
+

displayName

+

Optional

+
+

string

+

{% verbatim %}A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.{% endverbatim %}

+
+

location

+

Required*

+
+

string

+

{% verbatim %}{% endverbatim %}

+
+

privatePoolV1Config

+

Required*

+
+

object

+

{% verbatim %}Legacy Private Pool configuration.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig

+

Optional

+
+

object

+

{% verbatim %}Network configuration for the pool.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.egressOption

+

Optional

+
+

string

+

{% verbatim %}Option to configure network egress for the workers.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.peeredNetworkIPRange

+

Optional

+
+

string

+

{% verbatim %}Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.peeredNetworkRef

+

Optional

+
+

object

+

{% verbatim %}Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.peeredNetworkRef.external

+

Optional

+
+

string

+

{% verbatim %}The compute network selflink of form "projects//global/networks/", when not managed by KCC.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.peeredNetworkRef.name

+

Optional

+
+

string

+

{% verbatim %}The `name` field of a `ComputeNetwork` resource.{% endverbatim %}

+
+

privatePoolV1Config.networkConfig.peeredNetworkRef.namespace

+

Optional

+
+

string

+

{% verbatim %}The `namespace` field of a `ComputeNetwork` resource.{% endverbatim %}

+
+

privatePoolV1Config.workerConfig

+

Required*

+
+

object

+

{% verbatim %}Machine configuration for the workers in the pool.{% endverbatim %}

+
+

privatePoolV1Config.workerConfig.diskSizeGb

+

Optional

+
+

integer

+

{% verbatim %}Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size.{% endverbatim %}

+
+

privatePoolV1Config.workerConfig.machineType

+

Optional

+
+

string

+

{% verbatim %}Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default.{% endverbatim %}

+
+

projectRef

+

Required*

+
+

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+
+

projectRef.external

+

Optional

+
+

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+
+

projectRef.kind

+

Optional

+
+

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+
+

projectRef.name

+

Optional

+
+

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+
+

projectRef.namespace

+

Optional

+
+

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+
+

resourceID

+

Optional

+
+

string

+

{% verbatim %}The `WorkerPool` name. If not given, the metadata.name will be used.{% endverbatim %}

+
+ + +

* Field is required when parent field is specified

+ + +### Status +#### Schema +```yaml +conditions: +- lastTransitionTime: string + message: string + reason: string + status: string + type: string +externalRef: string +observedGeneration: integer +observedState: + createTime: string + etag: string + networkConfig: + egressOption: string + peeredNetwork: string + peeredNetworkIPRange: string + updateTime: string + workerConfig: + diskSizeGb: integer + machineType: string +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields
conditions +

list (object)

+

{% verbatim %}Conditions represent the latest available observations of the object's current state.{% endverbatim %}

+
conditions[] +

object

+

{% verbatim %}{% endverbatim %}

+
conditions[].lastTransitionTime +

string

+

{% verbatim %}Last time the condition transitioned from one status to another.{% endverbatim %}

+
conditions[].message +

string

+

{% verbatim %}Human-readable message indicating details about last transition.{% endverbatim %}

+
conditions[].reason +

string

+

{% verbatim %}Unique, one-word, CamelCase reason for the condition's last transition.{% endverbatim %}

+
conditions[].status +

string

+

{% verbatim %}Status is the status of the condition. Can be True, False, Unknown.{% endverbatim %}

+
conditions[].type +

string

+

{% verbatim %}Type is the type of the condition.{% endverbatim %}

+
externalRef +

string

+

{% verbatim %}A unique specifier for the CloudBuild workerpool resource in GCP.{% endverbatim %}

+
observedGeneration +

integer

+

{% verbatim %}ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource.{% endverbatim %}

+
observedState +

object

+

{% verbatim %}ObservedState is the state of the resource as most recently observed in GCP.{% endverbatim %}

+
observedState.createTime +

string

+

{% verbatim %}The creation timestamp of the workerpool.{% endverbatim %}

+
observedState.etag +

string

+

{% verbatim %}The Checksum computed by the server, using weak indicator.{% endverbatim %}

+
observedState.networkConfig +

object

+

{% verbatim %}Network configuration for the pool.{% endverbatim %}

+
observedState.networkConfig.egressOption +

string

+

{% verbatim %}Option to configure network egress for the workers.{% endverbatim %}

+
observedState.networkConfig.peeredNetwork +

string

+

{% verbatim %}Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network.{% endverbatim %}

+
observedState.networkConfig.peeredNetworkIPRange +

string

+

{% verbatim %}Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.{% endverbatim %}

+
observedState.updateTime +

string

+

{% verbatim %}The last update timestamp of the workerpool.{% endverbatim %}

+
observedState.workerConfig +

object

+

{% verbatim %}Machine configuration for the workers in the pool.{% endverbatim %}

+
observedState.workerConfig.diskSizeGb +

integer

+

{% verbatim %}Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). Specify a value of up to 2000. If `0` is specified, Cloud Build will use a standard disk size.{% endverbatim %}

+
observedState.workerConfig.machineType +

string

+

{% verbatim %}Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will use a sensible default.{% endverbatim %}

+
+ +## Sample YAML(s) + +### Workerpool With Default Network +```yaml +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 +kind: CloudBuildWorkerPool +metadata: + name: cloudbuildworkerpool-sample +spec: + projectRef: + # Replace ${PROJECT_ID?} with your project ID + external: "projects/${PROJECT_ID?}" + location: us-central1 + displayName: A sample cloud build private pool with VSA Service Control + privatePoolV1Config: + workerConfig: + machineType: e2-medium + diskSizeGb: 100 +``` + +### Workerpool With Peered Network +```yaml +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 +kind: CloudBuildWorkerPool +metadata: + name: cloudbuildworkerpool-${uniqueId} +spec: + projectRef: + # Replace ${PROJECT_ID?} with your project ID + external: projects/${PROJECT_ID?} + location: us-central1 + displayName: A sample cloud build private pool with custom peered network + privatePoolV1Config: + workerConfig: + machineType: e2-medium + diskSizeGb: 100 + networkConfig: + peeredNetworkRef: + name: computenetwork-dep + egressOption: NO_PUBLIC_EGRESS + peeredNetworkIPRange: /29 +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeAddress +metadata: + name: computenaddress-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + location: global + purpose: VPC_PEERING + addressType: INTERNAL + prefixLength: 24 + networkRef: + name: computenetwork-dep +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: computenetwork-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + autoCreateSubnetworks: false +--- +apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 +kind: ServiceNetworkingConnection +metadata: + name: servicenetworkconn-dep + annotations: + cnrm.cloud.google.com/project-id: ${PROJECT_ID?} +spec: + networkRef: + name: computenetwork-dep + service: servicenetworking.googleapis.com + reservedPeeringRanges: + - name: computenaddress-dep +--- +apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 +kind: Service +metadata: + name: service-sample +spec: + resourceID: servicenetworking.googleapis.com + projectRef: + external: projects/${PROJECT_ID?} +``` + + +Note: If you have any trouble with instantiating the resource, refer to Troubleshoot Config Connector. + +{% endblock %} diff --git a/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl b/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl new file mode 100644 index 0000000000..a064300a2e --- /dev/null +++ b/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl @@ -0,0 +1,50 @@ +{{template "headercomment.tmpl" .}} + +{% extends "config-connector/_base.html" %} + +{% block page_title %}{{ .Kind}}{% endblock %} +{% block body %} +{{template "alphadisclaimer.tmpl" .}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{{template "iamsupport.tmpl" .}} + +
PropertyValue
{{"{{gcp_name_short}}"}} Service NameCloud Build
{{"{{gcp_name_short}}"}} Service Documentation/cloud-build/docs/
{{"{{gcp_name_short}}"}} REST Resource Namev1.projects.workerpools
{{"{{gcp_name_short}}"}} REST Resource Documentation/cloud-build/docs/reference/rest/v1/projects.locations.workerPools
{{"{{product_name_short}}"}} Resource Short Names{{ .ShortNames}}
{{"{{product_name_short}}"}} Service Namecloudbuild.googleapis.com
{{"{{product_name_short}}"}} Resource Fully Qualified Name{{ .FullyQualifiedName}}
+ +{{template "resource.tmpl" .}} +{{template "endnote.tmpl" .}} +{% endblock %} From 9db446f3102b3ea1e974cdd73b3e2ee7492f5877 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Fri, 28 Jun 2024 02:16:37 +0000 Subject: [PATCH 091/101] resolve comments --- ...oudbuild_v1beta1_cloudbuildworkerpool.yaml | 2 +- ...oudbuild_v1beta1_cloudbuildworkerpool.yaml | 4 ++-- .../compute_v1beta1_computeaddress.yaml | 4 ++-- .../compute_v1beta1_computenetwork.yaml | 2 +- ...g_v1beta1_servicenetworkingconnection.yaml | 6 ++--- .../serviceusage_v1beta1_service.yaml | 22 ------------------- .../core/v1alpha1/servicemapping_types.go | 2 +- .../cloudbuild_cloudbuildworkerpool.tmpl | 4 ++++ 8 files changed, 14 insertions(+), 32 deletions(-) delete mode 100644 config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml index a0195a0665..645e4bf6b4 100644 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-default-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml @@ -14,7 +14,7 @@ apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: - name: cloudbuildworkerpool-sample + name: cloudbuildworkerpool-sample-default-network spec: projectRef: # Replace ${PROJECT_ID?} with your project ID diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml index 35246ec7b2..75a8d30a2e 100644 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/cloudbuild_v1beta1_cloudbuildworkerpool.yaml @@ -14,7 +14,7 @@ apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: - name: cloudbuildworkerpool-${uniqueId} + name: cloudbuildworkerpool-sample-peered-network spec: projectRef: # Replace ${PROJECT_ID?} with your project ID @@ -27,6 +27,6 @@ spec: diskSizeGb: 100 networkConfig: peeredNetworkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network egressOption: NO_PUBLIC_EGRESS peeredNetworkIPRange: /29 diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml index eb893b6576..3d3ca42eeb 100644 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computeaddress.yaml @@ -15,7 +15,7 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: computenaddress-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: @@ -24,4 +24,4 @@ spec: addressType: INTERNAL prefixLength: 24 networkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml index b1b04ed290..b821dd66eb 100644 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/compute_v1beta1_computenetwork.yaml @@ -15,7 +15,7 @@ apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork metadata: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml index d8c5b67ef3..64ca6023a8 100644 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml +++ b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/servicenetworking_v1beta1_servicenetworkingconnection.yaml @@ -15,12 +15,12 @@ apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: servicenetworkconn-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: networkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network service: servicenetworking.googleapis.com reservedPeeringRanges: - - name: computenaddress-dep + - name: cloudbuildworkerpool-dep-peered-network diff --git a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml b/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml deleted file mode 100644 index 9c1c475a70..0000000000 --- a/config/samples/resources/cloudbuildworkerpool/workerpool-with-peered-network/serviceusage_v1beta1_service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - name: service-sample -spec: - resourceID: servicenetworking.googleapis.com - projectRef: - external: projects/${PROJECT_ID?} diff --git a/pkg/apis/core/v1alpha1/servicemapping_types.go b/pkg/apis/core/v1alpha1/servicemapping_types.go index fc6af2b26b..897e9dbf3a 100644 --- a/pkg/apis/core/v1alpha1/servicemapping_types.go +++ b/pkg/apis/core/v1alpha1/servicemapping_types.go @@ -56,7 +56,7 @@ type ResourceConfig struct { // Direct tells if the ResourceConfig is for ConfigConnector directly managed resources. // Directly managed resource does not use Terraform or DCL controller, and do not rely on any TF specified fields like `SkipImport` - // A direct ResourceConfig is used to generate g3doc. + // A direct ResourceConfig is used to generate the reference doc. Direct bool `json:"direct"` // SkipImport skips the import step when fetching the live state of the underlying diff --git a/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl b/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl index a064300a2e..baa6eb5465 100644 --- a/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl +++ b/scripts/generate-google3-docs/resource-reference/templates/cloudbuild_cloudbuildworkerpool.tmpl @@ -42,6 +42,10 @@ {{ .FullyQualifiedName}} {{template "iamsupport.tmpl" .}} + +{{"{{product_name_short}}"}} Default Average Reconcile Interval In Seconds +{{ .DefaultReconcileInterval}} + From 90e1659a8ea0e710c9b4bd6873b471f512818c19 Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 27 Jun 2024 21:36:02 -0400 Subject: [PATCH 092/101] monitoringdashboard: move to direct actuation So that users can use these new fields. --- apis/monitoring/v1beta1/monitoringdashboard_types.go | 2 +- ...onitoringdashboards.monitoring.cnrm.cloud.google.com.yaml | 1 - .../apis/monitoring/v1beta1/monitoringdashboard_types.go | 2 +- scripts/github-actions/tests-e2e-fixtures | 5 +++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index b8a1c33005..6890d2dc1c 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -848,7 +848,7 @@ type MonitoringDashboardStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories=gcp,shortName=gcpmonitoringdashboard;gcpmonitoringdashboards // +kubebuilder:subresource:status -// +kubebuilder:metadata:labels="cnrm.cloud.google.com/dcl2crd=true";"cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true" +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true" // +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" // +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" // +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index d0245c3cf8..a0b7a54413 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -5,7 +5,6 @@ metadata: cnrm.cloud.google.com/version: 0.0.0-dev creationTimestamp: null labels: - cnrm.cloud.google.com/dcl2crd: "true" cnrm.cloud.google.com/managed-by-kcc: "true" cnrm.cloud.google.com/stability-level: stable cnrm.cloud.google.com/system: "true" diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 6a0461eb89..d07a5745ed 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -773,7 +773,7 @@ type MonitoringDashboardStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories=gcp,shortName=gcpmonitoringdashboard;gcpmonitoringdashboards // +kubebuilder:subresource:status -// +kubebuilder:metadata:labels="cnrm.cloud.google.com/dcl2crd=true";"cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true" +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/stability-level=stable";"cnrm.cloud.google.com/system=true" // +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" // +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" // +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" diff --git a/scripts/github-actions/tests-e2e-fixtures b/scripts/github-actions/tests-e2e-fixtures index 1a9e7575d1..32469d584b 100755 --- a/scripts/github-actions/tests-e2e-fixtures +++ b/scripts/github-actions/tests-e2e-fixtures @@ -23,8 +23,9 @@ cd ${REPO_ROOT}/ echo "Downloading envtest assets..." export KUBEBUILDER_ASSETS=$(go run sigs.k8s.io/controller-runtime/tools/setup-envtest@latest use -p path) -# Transient controller can only choose one controller type (Direct vs TF/DCL) to check the golden log. -export KCC_USE_DIRECT_RECONCILERS=MonitoringDashboard +# Transient controller can only choose one controller type (Direct vs TF/DCL) to check the golden log, +# so as we are moving resources to direct actuation, we add them here. +# e.g. export KCC_USE_DIRECT_RECONCILERS=MonitoringDashboard echo "Running fixtures in tests/e2e..." From d0a3fe6b9dd10e06a155bf9c3912788497ae156d Mon Sep 17 00:00:00 2001 From: justinsb Date: Thu, 27 Jun 2024 22:09:06 -0400 Subject: [PATCH 093/101] monitoringdashboard: Add `prometheusQuery` and `outputFullDuration` to timeSeriesQuery --- .../v1beta1/monitoringdashboard_types.go | 22 +- .../v1beta1/zz_generated.deepcopy.go | 10 + ...ards.monitoring.cnrm.cloud.google.com.yaml | 168 +++++++++ docs/releasenotes/release-1.120.md | 3 + .../v1beta1/monitoringdashboard_types.go | 14 + .../v1beta1/zz_generated.deepcopy.go | 10 + .../dashboard_generated.mappings.go | 10 +- .../direct/monitoring/dashboard_mappings.go | 9 + .../direct/monitoring/roundtrip_test.go | 2 - ...ated_export_monitoringdashboardfull.golden | 6 + ...object_monitoringdashboardfull.golden.yaml | 6 + .../monitoringdashboardfull/_http.log | 27 ++ .../monitoringdashboardfull/create.yaml | 5 + .../monitoring/monitoringdashboard.md | 336 ++++++++++++++++++ 14 files changed, 608 insertions(+), 20 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index b8a1c33005..76082a8b1c 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -584,10 +584,8 @@ type TimeSeriesQuery struct { // A query used to fetch time series with MQL. TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage,omitempty"` - /*NOTYET // A query used to fetch time series with PromQL. PrometheusQuery *string `json:"prometheusQuery,omitempty"` - */ // The unit of data contained in fetched time series. If non-empty, this // unit will override any unit that accompanies fetched data. The format is @@ -596,18 +594,14 @@ type TimeSeriesQuery struct { // field in `MetricDescriptor`. UnitOverride *string `json:"unitOverride,omitempty"` - /* - NOTYET - - // Optional. If set, Cloud Monitoring will treat the full query duration as - // the alignment period so that there will be only 1 output value. - // - // *Note: This could override the configured alignment period except for - // the cases where a series of data points are expected, like - // - XyChart - // - Scorecard's spark chart - OutputFullDuration *bool `json:"outputFullDuration,omitempty"` - */ + // Optional. If set, Cloud Monitoring will treat the full query duration as + // the alignment period so that there will be only 1 output value. + // + // *Note: This could override the configured alignment period except for + // the cases where a series of data points are expected, like + // - XyChart + // - Scorecard's spark chart + OutputFullDuration *bool `json:"outputFullDuration,omitempty"` } // +kcc:proto=google.monitoring.dashboard.v1.IncidentList diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 94fd3e50b7..27a55b3231 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1109,11 +1109,21 @@ func (in *TimeSeriesQuery) DeepCopyInto(out *TimeSeriesQuery) { *out = new(string) **out = **in } + if in.PrometheusQuery != nil { + in, out := &in.PrometheusQuery, &out.PrometheusQuery + *out = new(string) + **out = **in + } if in.UnitOverride != nil { in, out := &in.UnitOverride, &out.UnitOverride *out = new(string) **out = **in } + if in.OutputFullDuration != nil { + in, out := &in.OutputFullDuration, &out.OutputFullDuration + *out = new(bool) + **out = **in + } return } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index d0245c3cf8..6b3094d8b4 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -293,6 +293,20 @@ spec: description: Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -981,6 +995,20 @@ spec: description: Required. Fields for querying time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -1618,6 +1646,20 @@ spec: time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -2478,6 +2520,20 @@ spec: description: Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -3129,6 +3185,20 @@ spec: description: Required. Fields for querying time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series with + PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. properties: @@ -3730,6 +3800,20 @@ spec: description: Required. Fields for querying time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -4561,6 +4645,20 @@ spec: description: Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -5234,6 +5332,20 @@ spec: description: Required. Fields for querying time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -5859,6 +5971,20 @@ spec: time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -6767,6 +6893,20 @@ spec: description: Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -7455,6 +7595,20 @@ spec: description: Required. Fields for querying time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. @@ -8092,6 +8246,20 @@ spec: time series data from the Stackdriver metrics API. properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string timeSeriesFilter: description: Filter parameters to fetch time series. diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 183bfe2ddf..49dc7ea35b 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -27,12 +27,15 @@ output fields from GCP APIs are in `status.observedState.*` * Added `spec.severity` field. * `MonitoringDashboard` + * Added `alertChart` widgets. * Added `collapsibleGroup` widgets. * Added `pieChart` widgets. * Added `sectionHeader` widgets. * Added `singleViewGroup` widgets. + * Added `id` field to all widgets. + * Added `prometheusQuery` and `outputFullDuration` to timeSeriesQuery. * Added `style` fields to text widgets. * Added `targetAxis` field to thresholds. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 6a0461eb89..bdd9a36f61 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -558,6 +558,20 @@ type DashboardTimeSeriesFilterRatio struct { } type DashboardTimeSeriesQuery struct { + /* Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart */ + // +optional + OutputFullDuration *bool `json:"outputFullDuration,omitempty"` + + /* A query used to fetch time series with PromQL. */ + // +optional + PrometheusQuery *string `json:"prometheusQuery,omitempty"` + /* Filter parameters to fetch time series. */ // +optional TimeSeriesFilter *DashboardTimeSeriesFilter `json:"timeSeriesFilter,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 0a97512a7d..594669b203 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1434,6 +1434,16 @@ func (in *DashboardTimeSeriesFilterRatio) DeepCopy() *DashboardTimeSeriesFilterR // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardTimeSeriesQuery) DeepCopyInto(out *DashboardTimeSeriesQuery) { *out = *in + if in.OutputFullDuration != nil { + in, out := &in.OutputFullDuration, &out.OutputFullDuration + *out = new(bool) + **out = **in + } + if in.PrometheusQuery != nil { + in, out := &in.PrometheusQuery, &out.PrometheusQuery + *out = new(string) + **out = **in + } if in.TimeSeriesFilter != nil { in, out := &in.TimeSeriesFilter, &out.TimeSeriesFilter *out = new(DashboardTimeSeriesFilter) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 50352cac49..18d49675f5 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -685,9 +685,9 @@ func TimeSeriesQuery_FromProto(mapCtx *MapContext, in *pb.TimeSeriesQuery) *krm. out.TimeSeriesFilter = TimeSeriesFilter_FromProto(mapCtx, in.GetTimeSeriesFilter()) out.TimeSeriesFilterRatio = TimeSeriesFilterRatio_FromProto(mapCtx, in.GetTimeSeriesFilterRatio()) out.TimeSeriesQueryLanguage = LazyPtr(in.GetTimeSeriesQueryLanguage()) - // MISSING: PrometheusQuery + out.PrometheusQuery = LazyPtr(in.GetPrometheusQuery()) out.UnitOverride = LazyPtr(in.GetUnitOverride()) - // MISSING: OutputFullDuration + out.OutputFullDuration = LazyPtr(in.GetOutputFullDuration()) return out } func TimeSeriesQuery_ToProto(mapCtx *MapContext, in *krm.TimeSeriesQuery) *pb.TimeSeriesQuery { @@ -704,9 +704,11 @@ func TimeSeriesQuery_ToProto(mapCtx *MapContext, in *krm.TimeSeriesQuery) *pb.Ti if oneof := TimeSeriesQuery_TimeSeriesQueryLanguage_ToProto(mapCtx, in.TimeSeriesQueryLanguage); oneof != nil { out.Source = oneof } - // MISSING: PrometheusQuery + if oneof := TimeSeriesQuery_PrometheusQuery_ToProto(mapCtx, in.PrometheusQuery); oneof != nil { + out.Source = oneof + } out.UnitOverride = ValueOf(in.UnitOverride) - // MISSING: OutputFullDuration + out.OutputFullDuration = ValueOf(in.OutputFullDuration) return out } func TimeSeriesTable_FromProto(mapCtx *MapContext, in *pb.TimeSeriesTable) *krm.TimeSeriesTable { diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index 85066771d7..ea4174143c 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -197,3 +197,12 @@ func ErrorReportingPanel_ToProto(mapCtx *MapContext, in *krm.ErrorReportingPanel out.Versions = in.Versions return out } + +func TimeSeriesQuery_PrometheusQuery_ToProto(mapCtx *MapContext, in *string) *pb.TimeSeriesQuery_PrometheusQuery { + if in == nil { + return nil + } + out := &pb.TimeSeriesQuery_PrometheusQuery{} + out.PrometheusQuery = *in + return out +} diff --git a/pkg/controller/direct/monitoring/roundtrip_test.go b/pkg/controller/direct/monitoring/roundtrip_test.go index d1654546d8..623edbcaed 100644 --- a/pkg/controller/direct/monitoring/roundtrip_test.go +++ b/pkg/controller/direct/monitoring/roundtrip_test.go @@ -69,8 +69,6 @@ func FuzzMonitoringDashboardSpec(f *testing.F) { unimplementedFields.Insert(widgetPath + ".scorecard.thresholds[].target_axis") unimplementedFields.Insert(widgetPath + ".scorecard.blank_view") - unimplementedFields.Insert(widgetPath + ".scorecard.time_series_query.prometheus_query") - unimplementedFields.Insert(widgetPath + ".scorecard.time_series_query.output_full_duration") unimplementedFields.Insert(widgetPath + ".alert_chart") diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 66eb0ff6b3..dbc496876a 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -66,6 +66,12 @@ spec: - collapsibleGroup: collapsed: true title: CollapsibleGroup Widget + - scorecard: + timeSeriesQuery: + outputFullDuration: true + prometheusQuery: sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} + / 60) + title: Scorecard Widget - alertChart: alertPolicyRef: external: projects/${projectId}/alertPolicies/${alertPolicyID} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 91069b0c18..5bbd5d642f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -74,6 +74,12 @@ spec: - collapsibleGroup: collapsed: true title: CollapsibleGroup Widget + - scorecard: + timeSeriesQuery: + outputFullDuration: true + prometheusQuery: sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} + / 60) + title: Scorecard Widget - alertChart: alertPolicyRef: name: monitoringalertpolicy-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 13cbd5835b..c24d30bff3 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -294,6 +294,15 @@ x-goog-request-params: parent=projects%2F${projectId} }, "title": "CollapsibleGroup Widget" }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, { "alertChart": { "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" @@ -467,6 +476,15 @@ X-Xss-Protection: 0 }, "title": "CollapsibleGroup Widget" }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, { "alertChart": { "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" @@ -648,6 +666,15 @@ X-Xss-Protection: 0 }, "title": "CollapsibleGroup Widget" }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, { "alertChart": { "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 2c4693e849..5819ee6d14 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -80,6 +80,11 @@ spec: - title: "CollapsibleGroup Widget" collapsibleGroup: collapsed: true + - title: "Scorecard Widget" + scorecard: + timeSeriesQuery: + outputFullDuration: true + prometheusQuery: "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" - title: "AlertChart Widget" alertChart: alertPolicyRef: diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 71d1510baf..0fddc16e62 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -113,6 +113,8 @@ columnLayout: - minAlignmentPeriod: string sliceNameTemplate: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -175,6 +177,8 @@ columnLayout: targetAxis: string value: float timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -246,6 +250,8 @@ columnLayout: minAlignmentPeriod: string plotType: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -342,6 +348,8 @@ gridLayout: - minAlignmentPeriod: string sliceNameTemplate: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -404,6 +412,8 @@ gridLayout: targetAxis: string value: float timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -475,6 +485,8 @@ gridLayout: minAlignmentPeriod: string plotType: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -572,6 +584,8 @@ mosaicLayout: - minAlignmentPeriod: string sliceNameTemplate: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -634,6 +648,8 @@ mosaicLayout: targetAxis: string value: float timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -705,6 +721,8 @@ mosaicLayout: minAlignmentPeriod: string plotType: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -810,6 +828,8 @@ rowLayout: - minAlignmentPeriod: string sliceNameTemplate: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -872,6 +892,8 @@ rowLayout: targetAxis: string value: float timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -943,6 +965,8 @@ rowLayout: minAlignmentPeriod: string plotType: string timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string timeSeriesFilter: aggregation: alignmentPeriod: string @@ -1428,6 +1452,32 @@ rowLayout:

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -2290,6 +2340,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

@@ -3205,6 +3281,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -4416,6 +4518,32 @@ rowLayout:

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -5278,6 +5406,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

gridLayout.widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

@@ -6193,6 +6347,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -7414,6 +7594,32 @@ rowLayout:

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -8276,6 +8482,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter

@@ -9191,6 +9423,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -10502,6 +10760,32 @@ rowLayout:

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

@@ -11364,6 +11648,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

@@ -12279,6 +12589,32 @@ rowLayout:

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ +

rowLayout.rows[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

From 7d1cd2076a5417bcaea45403fc94eecd1b751ba1 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Fri, 28 Jun 2024 03:07:37 +0000 Subject: [PATCH 094/101] fix unit-tests due to the new Direct` in servicemapping --- config/servicemappings/cloudbuild.yaml | 8 ++--- .../servicemapping/servicemapping_test.go | 5 ++- .../resourcedescription.go | 3 ++ pkg/resourceskeleton/resourceskeleton_test.go | 6 ++++ .../snippetgeneration/snippetgeneration.go | 1 + .../resourcefixture/resourcefixture_test.go | 3 ++ pkg/test/resourcefixture/sets.go | 1 + .../cloudbuild/cloudbuildworkerpool.md | 31 ++++++++----------- 8 files changed, 35 insertions(+), 23 deletions(-) diff --git a/config/servicemappings/cloudbuild.yaml b/config/servicemappings/cloudbuild.yaml index aaf85f856d..2fc7ca84bf 100644 --- a/config/servicemappings/cloudbuild.yaml +++ b/config/servicemappings/cloudbuild.yaml @@ -22,10 +22,6 @@ spec: version: v1beta1 serviceHostName: "cloudbuild.googleapis.com" resources: - - name: google_cloudbuild_workerpool - kind: CloudBuildWorkerPool - direct: true - - name: google_cloudbuild_trigger kind: CloudBuildTrigger metadataMapping: @@ -231,3 +227,7 @@ spec: targetField: name ignoredFields: - trigger_template.project_id + + - name: google_cloudbuild_workerpool + kind: CloudBuildWorkerPool + direct: true diff --git a/config/tests/servicemapping/servicemapping_test.go b/config/tests/servicemapping/servicemapping_test.go index 82cbea0cac..8afb1c10fe 100644 --- a/config/tests/servicemapping/servicemapping_test.go +++ b/config/tests/servicemapping/servicemapping_test.go @@ -47,6 +47,9 @@ func TestIDTemplateCanBeUsedToMatchResourceNameShouldHaveValue(t *testing.T) { serviceMappings := testservicemappingloader.New(t).GetServiceMappings() for _, sm := range serviceMappings { for _, rc := range sm.Spec.Resources { + if rc.Direct { + continue + } if rc.IDTemplateCanBeUsedToMatchResourceName == nil { t.Fatalf("resource config '%v' is missing required field 'IDTemplateCanBeUsedToMatchResourceName'", rc.Name) @@ -606,7 +609,7 @@ func TestMustHaveIDTemplateOrServerGeneratedId(t *testing.T) { } func assertIDTemplateOrServerGeneratedID(t *testing.T, rc v1alpha1.ResourceConfig) { - if rc.IDTemplate == "" && rc.ServerGeneratedIDField == "" { + if !rc.Direct && rc.IDTemplate == "" && rc.ServerGeneratedIDField == "" { t.Fatalf("resource kind '%v' with name '%v' has neither id template or server generated ID defined: at least one must be present", rc.Kind, rc.Name) } } diff --git a/pkg/cli/cmd/printresources/resourcedescription/resourcedescription.go b/pkg/cli/cmd/printresources/resourcedescription/resourcedescription.go index f02ff1a6ad..19e58c4eee 100644 --- a/pkg/cli/cmd/printresources/resourcedescription/resourcedescription.go +++ b/pkg/cli/cmd/printresources/resourcedescription/resourcedescription.go @@ -103,6 +103,9 @@ func doesResourceSupportExport(tfProvider *tfschema.Provider, sm v1alpha1.Servic func resourceHasTFImporter(rc v1alpha1.ResourceConfig, tfProvider *tfschema.Provider) bool { // every value for rc.Name should be in the ResourcesMap + if rc.Direct { + return false + } resource := tfProvider.ResourcesMap[rc.Name] return resource.Importer != nil } diff --git a/pkg/resourceskeleton/resourceskeleton_test.go b/pkg/resourceskeleton/resourceskeleton_test.go index 8a5ee5c813..7089dae7ad 100644 --- a/pkg/resourceskeleton/resourceskeleton_test.go +++ b/pkg/resourceskeleton/resourceskeleton_test.go @@ -121,6 +121,9 @@ func ensureAssetTCExistsForEachResourceConfig(t *testing.T, smLoader *servicemap } for _, sm := range smLoader.GetServiceMappings() { for _, rc := range sm.Spec.Resources { + if rc.Direct { + continue + } if rc.AutoGenerated { continue } @@ -141,6 +144,9 @@ func ensureURITCExistsForEachResourceConfig(t *testing.T, smLoader *servicemappi } for _, sm := range smLoader.GetServiceMappings() { for _, rc := range sm.Spec.Resources { + if rc.Direct { + continue + } if rc.AutoGenerated { continue } diff --git a/pkg/snippet/snippetgeneration/snippetgeneration.go b/pkg/snippet/snippetgeneration/snippetgeneration.go index 95f450a70a..6ac21b2582 100644 --- a/pkg/snippet/snippetgeneration/snippetgeneration.go +++ b/pkg/snippet/snippetgeneration/snippetgeneration.go @@ -42,6 +42,7 @@ var preferredSampleForResource = map[string]string{ "binaryauthorizationpolicy": "cluster-policy", "certificatemanagercertificate": "self-managed-certificate", "cloudbuildtrigger": "build-trigger-for-cloud-source-repo", + "cloudbuildworkerpool": "workerpool-with-peered-network", "cloudfunctionsfunction": "httpstrigger", "cloudidentitymembership": "membership-with-manager-role", "cloudschedulerjob": "scheduler-job-pubsub", diff --git a/pkg/test/resourcefixture/resourcefixture_test.go b/pkg/test/resourcefixture/resourcefixture_test.go index 9912c81b76..e3eb40ef91 100644 --- a/pkg/test/resourcefixture/resourcefixture_test.go +++ b/pkg/test/resourcefixture/resourcefixture_test.go @@ -75,6 +75,9 @@ func getResourceConfigIDSet(t *testing.T, smLoader *servicemappingloader.Service resourceConfigIds := make(map[string]bool, 0) for _, sm := range sms { for _, rc := range sm.Spec.Resources { + if rc.Direct { + continue + } // No need to cover auto-generated v1alpha1 CRDs when calculating // set cover. if sm.GetVersionFor(&rc) == k8s.KCCAPIVersionV1Alpha1 { diff --git a/pkg/test/resourcefixture/sets.go b/pkg/test/resourcefixture/sets.go index 864407ac41..cf904b8dd2 100644 --- a/pkg/test/resourcefixture/sets.go +++ b/pkg/test/resourcefixture/sets.go @@ -108,6 +108,7 @@ func GetUniqueResourceConfigID(rc v1alpha1.ResourceConfig) string { if rc.Name == "google_compute_instance" || rc.Name == "google_compute_instance_from_template" { return fmt.Sprintf("%v:%v", rc.Kind, rc.Name) } + return rc.Kind } diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md index ac2f73f461..d14b16a55d 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md @@ -49,6 +49,10 @@ + +{{product_name_short}} Default Average Reconcile Interval In Seconds +600 + @@ -492,7 +496,7 @@ observedState: apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: - name: cloudbuildworkerpool-sample + name: cloudbuildworkerpool-sample-default-network spec: projectRef: # Replace ${PROJECT_ID?} with your project ID @@ -523,7 +527,7 @@ spec: apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: - name: cloudbuildworkerpool-${uniqueId} + name: cloudbuildworkerpool-sample-peered-network spec: projectRef: # Replace ${PROJECT_ID?} with your project ID @@ -536,14 +540,14 @@ spec: diskSizeGb: 100 networkConfig: peeredNetworkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network egressOption: NO_PUBLIC_EGRESS peeredNetworkIPRange: /29 --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeAddress metadata: - name: computenaddress-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: @@ -552,12 +556,12 @@ spec: addressType: INTERNAL prefixLength: 24 networkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network --- apiVersion: compute.cnrm.cloud.google.com/v1beta1 kind: ComputeNetwork metadata: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: @@ -566,24 +570,15 @@ spec: apiVersion: servicenetworking.cnrm.cloud.google.com/v1beta1 kind: ServiceNetworkingConnection metadata: - name: servicenetworkconn-dep + name: cloudbuildworkerpool-dep-peered-network annotations: cnrm.cloud.google.com/project-id: ${PROJECT_ID?} spec: networkRef: - name: computenetwork-dep + name: cloudbuildworkerpool-dep-peered-network service: servicenetworking.googleapis.com reservedPeeringRanges: - - name: computenaddress-dep ---- -apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 -kind: Service -metadata: - name: service-sample -spec: - resourceID: servicenetworking.googleapis.com - projectRef: - external: projects/${PROJECT_ID?} + - name: cloudbuildworkerpool-dep-peered-network ``` From 07aa5ffeeeb2843e4a552c0ed5df597e47c3ca37 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Fri, 28 Jun 2024 04:20:19 +0000 Subject: [PATCH 095/101] turn on servicemappnig adds the default conflict-management label to cbwp golden object --- .../_generated_object_cloudbuildworkerpool.golden.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml index b27cfeb085..606646cd8d 100644 --- a/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/cloudbuild/v1beta1/cloudbuildworkerpool/_generated_object_cloudbuildworkerpool.golden.yaml @@ -1,6 +1,8 @@ apiVersion: cloudbuild.cnrm.cloud.google.com/v1beta1 kind: CloudBuildWorkerPool metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none finalizers: - cnrm.cloud.google.com/finalizer - cnrm.cloud.google.com/deletion-defender From c760088463d5b193a8a049372a206da2ffd0bc33 Mon Sep 17 00:00:00 2001 From: Yuwen Ma Date: Fri, 28 Jun 2024 04:44:16 +0000 Subject: [PATCH 096/101] chore: remove state-into-spec from CBWP doc --- .../cloudbuild/cloudbuildworkerpool.md | 14 -------------- .../resource-reference/main.go | 13 ++++++++++++- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md index d14b16a55d..5ebb2bed49 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/cloudbuild/cloudbuildworkerpool.md @@ -59,20 +59,6 @@ ## Custom Resource Definition Properties -### Annotations - - - - - - - - - - - -
Fields
cnrm.cloud.google.com/state-into-spec
- ### Spec #### Schema diff --git a/scripts/generate-google3-docs/resource-reference/main.go b/scripts/generate-google3-docs/resource-reference/main.go index a94939cb32..0fb4ca0bbc 100644 --- a/scripts/generate-google3-docs/resource-reference/main.go +++ b/scripts/generate-google3-docs/resource-reference/main.go @@ -344,11 +344,22 @@ func handleAnnotationsAndIAMSettingsForDCLBasedResource(r *resource, gvk schema. func handleAnnotationsAndIAMSettingsForTFBasedResource(r *resource, gvk schema.GroupVersionKind, smLoader *servicemappingloader.ServiceMappingLoader) error { annotationSet := sets.NewString() - annotationSet.Insert(k8s.StateIntoSpecAnnotation) rcs, err := smLoader.GetResourceConfigs(gvk) if err != nil { return fmt.Errorf("error getting resource configs: %w", err) } + // Do not show the "state-into-spec" annotation for the direct resource google3 doc. + direct := false + for _, rc := range rcs { + if rc.Direct { + direct = true + break + } + } + if !direct { + annotationSet.Insert(k8s.StateIntoSpecAnnotation) + } + for _, rc := range rcs { if rc.Directives != nil { for _, d := range rc.Directives { From 1f1ec4f46bbaab692c2d041afc40a213a2c0ae06 Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 21:09:49 -0400 Subject: [PATCH 097/101] monitoringdashboard - use etag to detect changes --- .../direct/monitoring/changedetection.go | 113 ++++ .../monitoringdashboard_controller.go | 26 +- .../monitoringdashboardfull/_http.log | 549 ++++++++++++++++++ 3 files changed, 663 insertions(+), 25 deletions(-) create mode 100644 pkg/controller/direct/monitoring/changedetection.go diff --git a/pkg/controller/direct/monitoring/changedetection.go b/pkg/controller/direct/monitoring/changedetection.go new file mode 100644 index 0000000000..bc6fb4b258 --- /dev/null +++ b/pkg/controller/direct/monitoring/changedetection.go @@ -0,0 +1,113 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/k8s/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +// objectWithEtag holds the fields that are relevant to an etag-based change detection. +type objectWithEtag struct { + Status objectWithEtagtatus `json:"status"` +} + +type objectWithEtagtatus struct { + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` + + // Used if status.observedState.etag is not set + Etag *string `json:"etag,omitempty"` + + // Compared to the object's generation to detect spec changes + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + ObservedState objectWithEtagObservedState `json:"observedState,omitempty"` +} + +type objectWithEtagObservedState struct { + // Checked before status.etag + Etag *string `json:"etag,omitempty"` +} + +// ShouldReconcileBasedOnEtag checks if we should reconcile based on the GCP etag matching the KRM etag. +// If the etag in KRM status is the same as the GCP etag, we consider the GCP object not to have changed. +// We also consider the object to have changes if the KRM object generation != observedGeneration (spec changes), +// and we also reconcile again if the object is not healthy (based on status.conditions). +// +// A few problems with the approach: +// * We miss changes due to labels or annotations. +// * If there's a change in the GCP object that isn't reflected in etag, we miss that (seems unlikely) +// * Because we set spec.resourceID, we do an extra reconciliation after first creation (because we bump generation). +func ShouldReconcileBasedOnEtag(ctx context.Context, u *unstructured.Unstructured, gcpEtag string) bool { + log := klog.FromContext(ctx) + + obj := &objectWithEtag{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &obj); err != nil { + log.Error(err, "error converting from unstructured") + return true + } + + if u.GetGeneration() != ValueOf(obj.Status.ObservedGeneration) { + log.V(2).Info("generation does not match", "generation", u.GetGeneration(), "observedGeneration", ValueOf(obj.Status.ObservedGeneration)) + return true + } + + if gcpEtag == "" { + log.V(2).Info("etag not set in GCP") + return true + } + + objectEtag := ValueOf(obj.Status.ObservedState.Etag) + if objectEtag == "" { + objectEtag = ValueOf(obj.Status.Etag) + } + + if objectEtag == "" { + log.V(2).Info("etag not set in KRM object") + return true + } + + if gcpEtag != objectEtag { + log.V(2).Info("object status etag does not match gcp updateTime", "objectEtag", objectEtag, "gcpEtag", gcpEtag) + return true + } + + if obj.Status.Conditions != nil { + // if there was a previously failing update let's make sure we give + // the update a chance to heal or keep marking it as failed + + ready := false + for _, condition := range obj.Status.Conditions { + if condition.Type == v1alpha1.ReadyConditionType { + if condition.Status == corev1.ConditionTrue { + ready = true + } + } + } + + if !ready { + log.V(2).Info("status.conditions indicates object is not ready yet") + return true + } + } + + log.V(2).Info("object etag matches gcp etag", "objectEtag", objectEtag, "gcpEtag", gcpEtag) + return false +} diff --git a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go index 1278fbf005..0c411a8a4b 100644 --- a/pkg/controller/direct/monitoring/monitoringdashboard_controller.go +++ b/pkg/controller/direct/monitoring/monitoringdashboard_controller.go @@ -23,7 +23,6 @@ import ( pb "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -232,10 +231,7 @@ func (a *dashboardAdapter) Update(ctx context.Context, u *unstructured.Unstructu // TODO: Where/how do we want to enforce immutability? - changedFields := ComputeChangedFields(onlySpec(a.desired), onlySpec(a.actual)) - if len(changedFields) != 0 { - log.Info("changed fields", "fields", sets.List(changedFields)) - + if ShouldReconcileBasedOnEtag(ctx, u, a.actual.Etag) { req := &pb.UpdateDashboardRequest{ Dashboard: a.desired, } @@ -288,26 +284,6 @@ func (a *dashboardAdapter) Export(ctx context.Context) (*unstructured.Unstructur return u, nil } -func onlySpec(in *pb.Dashboard) *pb.Dashboard { - // We could also do this "directly" with... - // c := proto.Clone(in).(*pb.Dashboard) - // c.Etag = "" - // c.Name = "" - - // Remove unmapped fields by round-tripping through spec - mapCtx := &MapContext{} - spec := MonitoringDashboardSpec_FromProto(mapCtx, in) - if mapCtx.Err() != nil { - klog.Fatalf("error during onlySpec: %v", mapCtx.Err()) - } - - out := MonitoringDashboardSpec_ToProto(mapCtx, spec) - if mapCtx.Err() != nil { - klog.Fatalf("error during onlySpec: %v", mapCtx.Err()) - } - return out -} - func (a *dashboardAdapter) fullyQualifiedName() string { return fmt.Sprintf("projects/%s/dashboards/%s", a.projectID, a.resourceID) } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index c24d30bff3..ae681fd46f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -735,6 +735,555 @@ X-Xss-Protection: 0 --- +PATCH https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": 1, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "text": { + "content": "Widget 2", + "format": 1, + "style": { + "backgroundColor": "#000", + "fontSize": 4, + "horizontalAlignment": 2, + "padding": 3, + "pointerLocation": 5, + "textColor": "#fff", + "verticalAlignment": 2 + } + } + }, + { + "id": "widget3", + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": 3, + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "thresholds": [ + { + "label": "Important", + "targetAxis": 1, + "value": 1.2 + } + ], + "yAxis": { + "label": "y1Axis", + "scale": 1 + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" + }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": 2, + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 12 + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": { + "backgroundColor": "#000", + "fontSize": "FS_LARGE", + "horizontalAlignment": "H_CENTER", + "padding": "P_MEDIUM", + "pointerLocation": "PL_TOP_LEFT", + "textColor": "#fff", + "verticalAlignment": "V_CENTER" + } + } + }, + { + "id": "widget3", + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "thresholds": [ + { + "label": "Important", + "targetAxis": "Y1", + "value": 1.2 + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" + }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": "DONUT", + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + +GET https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +User-Agent: kcc/controller-manager +x-goog-request-params: name=projects%2F${projectId}%2Fdashboards%2Fmonitoringdashboard-${uniqueId} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "columnLayout": { + "columns": [ + { + "weight": "2", + "widgets": [ + { + "id": "singleViewGroupWidget1", + "singleViewGroup": {}, + "title": "SingleViewGroup Widget" + }, + { + "title": "Widget 1", + "xyChart": { + "dataSets": [ + { + "plotType": "LINE", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "timeshiftDuration": "600.500s", + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "text": { + "content": "Widget 2", + "format": "MARKDOWN", + "style": { + "backgroundColor": "#000", + "fontSize": "FS_LARGE", + "horizontalAlignment": "H_CENTER", + "padding": "P_MEDIUM", + "pointerLocation": "PL_TOP_LEFT", + "textColor": "#fff", + "verticalAlignment": "V_CENTER" + } + } + }, + { + "id": "widget3", + "title": "Widget 3", + "xyChart": { + "dataSets": [ + { + "plotType": "STACKED_BAR", + "targetAxis": "Y1", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"" + }, + "unitOverride": "1" + } + } + ], + "thresholds": [ + { + "label": "Important", + "targetAxis": "Y1", + "value": 1.2 + } + ], + "yAxis": { + "label": "y1Axis", + "scale": "LINEAR" + } + } + }, + { + "logsPanel": { + "filter": "metric.type=\"agent.googleapis.com/nginx/connections/accepted_count\"", + "resourceNames": [ + "projects/${projectId}" + ] + }, + "title": "Widget 4" + }, + { + "sectionHeader": { + "dividerBelow": true, + "subtitle": "Example SectionHeader" + }, + "title": "SectionHeader Widget" + }, + { + "collapsibleGroup": { + "collapsed": true + }, + "title": "CollapsibleGroup Widget" + }, + { + "scorecard": { + "timeSeriesQuery": { + "outputFullDuration": true, + "prometheusQuery": "sum(mysql_global_status_uptime{${Cluster},${Location},${Namespace}} / 60)" + } + }, + "title": "Scorecard Widget" + }, + { + "alertChart": { + "name": "projects/${projectId}/alertPolicies/${alertPolicyID}" + }, + "title": "AlertChart Widget" + }, + { + "pieChart": { + "chartType": "DONUT", + "dataSets": [ + { + "minAlignmentPeriod": "60s", + "sliceNameTemplate": "${resource.labels.zone}", + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"", + "secondaryAggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_MEAN" + } + } + } + } + ], + "showLabels": true + }, + "title": "PieChart Widget" + }, + { + "errorReportingPanel": { + "projectNames": [ + "projects/project1", + "projects/project2" + ], + "services": [ + "foo", + "bar" + ], + "versions": [ + "v1", + "v2" + ] + }, + "title": "ErrorReporting Widget" + } + ] + } + ] + }, + "displayName": "monitoringdashboard-full", + "etag": "abcdef0123A=", + "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" +} + +--- + DELETE https://monitoring.googleapis.com/v1/projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}?%24alt=json%3Benum-encoding%3Dint Content-Type: application/json User-Agent: kcc/controller-manager From 058d7cd7e53d3c3186fe63f1e39c96b098a184ee Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 17:22:14 -0400 Subject: [PATCH 098/101] monitoringdashboard: add timeSeriesTable to widget --- .../v1beta1/monitoringdashboard_types.go | 5 - .../v1beta1/zz_generated.deepcopy.go | 10 + ...ards.monitoring.cnrm.cloud.google.com.yaml | 12653 ++++++++++------ .../v1beta1/monitoringdashboard_types.go | 35 + .../v1beta1/zz_generated.deepcopy.go | 80 + .../dashboard_generated.mappings.go | 20 +- .../direct/monitoring/dashboard_mappings.go | 14 + ...ated_export_monitoringdashboardfull.golden | 16 + ...object_monitoringdashboardfull.golden.yaml | 16 + .../monitoringdashboardfull/_http.log | 84 + .../monitoringdashboardfull/create.yaml | 15 + .../monitoring/monitoringdashboard.md | 8240 +++++++--- 12 files changed, 13774 insertions(+), 7414 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 53e69c91fa..89b4259351 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -206,10 +206,8 @@ type TimeSeriesTable struct { // +required DataSets []TimeSeriesTable_TableDataSet `json:"dataSets,omitempty"` - /*NOTYET // Optional. Store rendering strategy MetricVisualization *string `json:"metricVisualization,omitempty"` - */ // Optional. The list of the persistent column settings for the table. ColumnSettings []TimeSeriesTable_ColumnSettings `json:"columnSettings,omitempty"` @@ -445,11 +443,8 @@ type Widget struct { // A blank space. Blank *Empty `json:"blank,omitempty"` - /*NOTYET - // A widget that displays time series data in a tabular format. TimeSeriesTable *TimeSeriesTable `json:"timeSeriesTable,omitempty"` - */ // A widget that groups the other widgets. All widgets that are within // the area spanned by the grouping widget are considered member widgets. diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 27a55b3231..2ed8807085 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1147,6 +1147,11 @@ func (in *TimeSeriesTable) DeepCopyInto(out *TimeSeriesTable) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.MetricVisualization != nil { + in, out := &in.MetricVisualization, &out.MetricVisualization + *out = new(string) + **out = **in + } if in.ColumnSettings != nil { in, out := &in.ColumnSettings, &out.ColumnSettings *out = make([]TimeSeriesTable_ColumnSettings, len(*in)) @@ -1257,6 +1262,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(Empty) **out = **in } + if in.TimeSeriesTable != nil { + in, out := &in.TimeSeriesTable, &out.TimeSeriesTable + *out = new(TimeSeriesTable) + (*in).DeepCopyInto(*out) + } if in.CollapsibleGroup != nil { in, out := &in.CollapsibleGroup, &out.CollapsibleGroup *out = new(CollapsibleGroup) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index f9e6e68798..97a9224a5a 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -1600,31 +1600,32 @@ spec: type: string type: object type: object - title: - description: Optional. The title of the widget. - type: string - xyChart: - description: A chart of time series data. + timeSeriesTable: + description: A widget that displays time series data + in a tabular format. properties: - chartOptions: - description: Display options for the chart. - properties: - mode: - description: The chart mode. - type: string - type: object + columnSettings: + description: Optional. The list of the persistent + column settings for the table. + items: + properties: + column: + description: Required. The id of the column. + type: string + visible: + description: Required. Whether the column + should be visible on page load. + type: boolean + required: + - column + - visible + type: object + type: array dataSets: description: Required. The data displayed in this - chart. + table. items: properties: - legendTemplate: - description: A template string for naming - `TimeSeries` in the resulting data set. - This should be a string with interpolations - of the form `${label_name}`, which will - resolve to the label's value. - type: string minAlignmentPeriod: description: Optional. The lower bound on data point frequency for this data set, @@ -1636,9 +1637,24 @@ spec: not make sense to fetch and align data at one minute intervals. type: string - plotType: - description: How this data should be plotted - on the chart. + tableDisplayOptions: + description: Optional. Table display options + for configuring how the table is rendered. + properties: + shownColumns: + description: Optional. This field is + unused and has been replaced by TimeSeriesTable.column_settings + items: + type: string + type: array + type: object + tableTemplate: + description: Optional. A template string + for naming `TimeSeries` in the resulting + data set. This should be a string with + interpolations of the form `${label_name}`, + which will resolve to the label's value + i.e. "${resource.labels.project_id}." type: string timeSeriesQuery: description: Required. Fields for querying @@ -2223,1631 +2239,982 @@ spec: field in `MetricDescriptor`. type: string type: object - required: - - timeSeriesQuery - type: object - type: array - thresholds: - description: Threshold lines drawn horizontally - across the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in - a XyChart. - type: string - label: - description: A label for the threshold. - type: string - targetAxis: - description: The target axis to use for - plotting the threshold. Target axis is - not allowed in a Scorecard. - type: string - value: - description: The value of the threshold. - The value should be defined in the native - scale of the metric. - format: double - type: number type: object type: array - timeshiftDuration: - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows - values from two similar-length time periods - (e.g., week-over-week metrics). The duration - must be positive, and it can only be applied - to charts with data sets of LINE plot type. + metricVisualization: + description: Optional. Store rendering strategy type: string - xAxis: - description: The properties applied to the x-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a - linear scale is used. - type: string - type: object - yAxis: - description: The properties applied to the y-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a - linear scale is used. - type: string - type: object required: - dataSets type: object - type: object - type: array - type: object - type: array - type: object - displayName: - description: Required. The mutable, human-readable name. - type: string - gridLayout: - description: Content is arranged with a basic layout that re-flows - a simple list of informational elements like widgets or tiles. - properties: - columns: - description: The number of columns into which the view's width - is divided. If omitted or set to zero, a system default will - be used while rendering. - format: int64 - type: integer - widgets: - description: The informational elements that are arranged into - the columns row-first. - items: - properties: - alertChart: - description: A chart of alert policy data. - properties: - alertPolicyRef: - description: Required. A reference to the MonitoringAlertPolicy. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The MonitoringAlertPolicy link in the - form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", - when not managed by KCC. - type: string - name: - description: The `name` field of a `MonitoringAlertPolicy` - resource. - type: string - namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` - resource. - type: string - type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. - type: object - collapsibleGroup: - description: A widget that groups the other widgets. All - widgets that are within the area spanned by the grouping - widget are considered member widgets. - properties: - collapsed: - description: The collapsed state of the widget on first - page load. - type: boolean - type: object - errorReportingPanel: - description: A widget that displays a list of error groups. - properties: - projectRefs: - description: The projects from which to gather errors. - items: - description: The Project that this resource belongs - to. - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The `projectID` field of a project, - when not managed by KCC. - type: string - kind: - description: The kind of the Project resource; - optional but must be `Project` if provided. - type: string - name: - description: The `name` field of a `Project` resource. - type: string - namespace: - description: The `namespace` field of a `Project` - resource. - type: string - type: object - type: array - services: - description: |- - An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. - - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used. - items: - type: string - type: array - versions: - description: Represents the source code version that - the developer provided, which could represent a version - label or a Git SHA-1 hash, for example. For App Engine - standard environment, the version is set to the version - of the app. - items: + title: + description: Optional. The title of the widget. type: string - type: array - type: object - id: - description: Optional. The widget id. Ids may be made up - of alphanumerics, dashes and underscores. Widget ids are - optional. - type: string - logsPanel: - description: A widget that shows a stream of logs. - properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to collect - logs for. Currently only projects are supported. If - empty, the widget will default to the host project. - items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The external name of the referenced - resource - type: string - kind: - description: Kind of the referent. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - type: object - type: array - type: object - pieChart: - description: A widget that displays timeseries data as a - pie chart. - properties: - chartType: - description: Required. Indicates the visualization type - for the PieChart. - type: string - dataSets: - description: Required. The queries for the chart's data. - items: + xyChart: + description: A chart of time series data. properties: - minAlignmentPeriod: - description: Optional. The lower bound on data - point frequency for this data set, implemented - by specifying the minimum alignment period to - use in a time series query. For example, if - the data is published once every 10 minutes, - the `min_alignment_period` should be at least - 10 minutes. It would not make sense to fetch - and align data at one minute intervals. - type: string - sliceNameTemplate: - description: Optional. A template for the name - of the slice. This name will be displayed in - the legend and the tooltip of the pie chart. - It replaces the auto-generated names for the - slices. For example, if the template is set - to `${resource.labels.zone}`, the zone's value - will be used for the name instead of the default - name. - type: string - timeSeriesQuery: - description: Required. The query for the PieChart. - See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + chartOptions: + description: Display options for the chart. properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time series - with PromQL. + mode: + description: The chart mode. type: string - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + type: object + dataSets: + description: Required. The data displayed in this + chart. + items: + properties: + legendTemplate: + description: A template string for naming + `TimeSeries` in the resulting data set. + This should be a string with interpolations + of the form `${label_name}`, which will + resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. + type: string + plotType: + description: How this data should be plotted + on the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver + metrics API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - showLabels: - description: Optional. Indicates whether or not the - pie chart should show slices' labels - type: boolean - required: - - chartType - - dataSets - type: object - scorecard: - description: A scorecard summarizing time series data. - properties: - gaugeView: - description: Will cause the scorecard to show a gauge - chart. - properties: - lowerBound: - description: The lower bound for this gauge chart. - The value of the chart should always be greater - than or equal to this. - format: double - type: number - upperBound: - description: The upper bound for this gauge chart. - The value of the chart should always be less than - or equal to this. - format: double - type: number - type: object - sparkChartView: - description: Will cause the scorecard to show a spark - chart. - properties: - minAlignmentPeriod: - description: The lower bound on data point frequency - in the chart implemented by specifying the minimum - alignment period to use in a time series query. - For example, if the data is published once every - 10 minutes it would not make sense to fetch and - align data at one minute intervals. This field - is optional and exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart to - show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current threshold. - Direction is not allowed in a XyChart. - type: string - label: - description: A label for the threshold. - type: string - targetAxis: - description: The target axis to use for plotting - the threshold. Target axis is not allowed in - a Scorecard. - type: string - value: - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time series with - PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch time series. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views of - the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series filter. + type: array + thresholds: + description: Threshold lines drawn horizontally + across the chart. + items: properties: - direction: - description: How to use the ranking to select - time series that pass through the filter. - type: string - numTimeSeries: - description: How many time series to allow - to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series.' + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. type: string - type: object - secondaryAggregation: - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. + direction: + description: The direction for the current + threshold. Direction is not allowed in + a XyChart. type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. + label: + description: A label for the threshold. type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. type: string + value: + description: The value of the threshold. + The value should be defined in the native + scale of the metric. + format: double + type: number type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + type: array + timeshiftDuration: + description: The duration used to display a comparison + chart. A comparison chart simultaneously shows + values from two similar-length time periods + (e.g., week-over-week metrics). The duration + must be positive, and it can only be applied + to charts with data sets of LINE plot type. + type: string + xAxis: + description: The properties applied to the x-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a + linear scale is used. + type: string + type: object + yAxis: + description: The properties applied to the y-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a + linear scale is used. + type: string + type: object + required: + - dataSets + type: object + type: object + type: array + type: object + type: array + type: object + displayName: + description: Required. The mutable, human-readable name. + type: string + gridLayout: + description: Content is arranged with a basic layout that re-flows + a simple list of informational elements like widgets or tiles. + properties: + columns: + description: The number of columns into which the view's width + is divided. If omitted or set to zero, a system default will + be used while rendering. + format: int64 + type: integer + widgets: + description: The informational elements that are arranged into + the columns row-first. + items: + properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link in the + form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. All + widgets that are within the area spanned by the grouping + widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on first + page load. + type: boolean + type: object + errorReportingPanel: + description: A widget that displays a list of error groups. + properties: + projectRefs: + description: The projects from which to gather errors. + items: + description: The Project that this resource belongs + to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a project, + when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` resource. + type: string + namespace: + description: The `namespace` field of a `Project` + resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version that + the developer provided, which could represent a version + label or a Git SHA-1 hash, for example. For App Engine + standard environment, the version is set to the version + of the app. + items: + type: string + type: array + type: object + id: + description: Optional. The widget id. Ids may be made up + of alphanumerics, dashes and underscores. Widget ids are + optional. + type: string + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to collect + logs for. Currently only projects are supported. If + empty, the widget will default to the host project. + items: + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data as a + pie chart. + properties: + chartType: + description: Required. Indicates the visualization type + for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on data + point frequency for this data set, implemented + by specifying the minimum alignment period to + use in a time series query. For example, if + the data is published once every 10 minutes, + the `min_alignment_period` should be at least + 10 minutes. It would not make sense to fetch + and align data at one minute intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the name + of the slice. This name will be displayed in + the legend and the tooltip of the pie chart. + It replaces the auto-generated names for the + slices. For example, if the template is set + to `${resource.labels.zone}`, the zone's value + will be used for the name instead of the default + name. + type: string + timeSeriesQuery: + description: Required. The query for the PieChart. + See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking to select - time series that pass through the filter. - type: string - numTimeSeries: - description: How many time series to allow - to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series with - MQL. - type: string - unitOverride: - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header for - easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below the section - in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - singleViewGroup: - description: A widget that groups the other widgets by using - a dropdown menu. - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. - type: string - style: - description: How the text is styled - properties: - backgroundColor: - description: The background color as a hex string. - "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title and content. - The title will still be larger relative to the - content. - type: string - horizontalAlignment: - description: The horizontal alignment of both the - title and content - type: string - padding: - description: The amount of padding around the widget - type: string - pointerLocation: - description: The pointer location for this widget - (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. "#RRGGBB" - or "#RGB" - type: string - verticalAlignment: - description: The vertical alignment of both the - title and content - type: string - type: object - type: object - title: - description: Optional. The title of the widget. - type: string - xyChart: - description: A chart of time series data. - properties: - chartOptions: - description: Display options for the chart. - properties: - mode: - description: The chart mode. - type: string - type: object - dataSets: - description: Required. The data displayed in this chart. - items: - properties: - legendTemplate: - description: A template string for naming `TimeSeries` - in the resulting data set. This should be a - string with interpolations of the form `${label_name}`, - which will resolve to the label's value. - type: string - minAlignmentPeriod: - description: Optional. The lower bound on data - point frequency for this data set, implemented - by specifying the minimum alignment period to - use in a time series query For example, if the - data is published once every 10 minutes, the - `min_alignment_period` should be at least 10 - minutes. It would not make sense to fetch and - align data at one minute intervals. - type: string - plotType: - description: How this data should be plotted on - the chart. - type: string - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time series - with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is @@ -4346,1679 +3713,1604 @@ spec: - timeSeriesQuery type: object type: array - thresholds: - description: Threshold lines drawn horizontally across - the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current threshold. - Direction is not allowed in a XyChart. - type: string - label: - description: A label for the threshold. - type: string - targetAxis: - description: The target axis to use for plotting - the threshold. Target axis is not allowed in - a Scorecard. - type: string - value: - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - format: double - type: number - type: object - type: array - timeshiftDuration: - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows values - from two similar-length time periods (e.g., week-over-week - metrics). The duration must be positive, and it can - only be applied to charts with data sets of LINE plot - type. - type: string - xAxis: - description: The properties applied to the x-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object - yAxis: - description: The properties applied to the y-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object + showLabels: + description: Optional. Indicates whether or not the + pie chart should show slices' labels + type: boolean required: + - chartType - dataSets type: object - type: object - type: array - type: object - mosaicLayout: - description: The content is arranged as a grid of tiles, with each - content widget occupying one or more grid blocks. - properties: - columns: - description: The number of columns in the mosaic grid. The number - of columns must be between 1 and 12, inclusive. - format: int32 - type: integer - tiles: - description: The tiles to display. - items: - properties: - height: - description: The height of the tile, measured in grid blocks. - Tiles must have a minimum height of 1. - format: int32 - type: integer - widget: - description: The informational widget contained in the tile. - For example an `XyChart`. + scorecard: + description: A scorecard summarizing time series data. properties: - alertChart: - description: A chart of alert policy data. + gaugeView: + description: Will cause the scorecard to show a gauge + chart. properties: - alertPolicyRef: - description: Required. A reference to the MonitoringAlertPolicy. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The MonitoringAlertPolicy link - in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", - when not managed by KCC. - type: string - name: - description: The `name` field of a `MonitoringAlertPolicy` - resource. - type: string - namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` - resource. - type: string - type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. + lowerBound: + description: The lower bound for this gauge chart. + The value of the chart should always be greater + than or equal to this. + format: double + type: number + upperBound: + description: The upper bound for this gauge chart. + The value of the chart should always be less than + or equal to this. + format: double + type: number type: object - collapsibleGroup: - description: A widget that groups the other widgets. - All widgets that are within the area spanned by the - grouping widget are considered member widgets. + sparkChartView: + description: Will cause the scorecard to show a spark + chart. properties: - collapsed: - description: The collapsed state of the widget on - first page load. - type: boolean + minAlignmentPeriod: + description: The lower bound on data point frequency + in the chart implemented by specifying the minimum + alignment period to use in a time series query. + For example, if the data is published once every + 10 minutes it would not make sense to fetch and + align data at one minute intervals. This field + is optional and exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart to + show in this chartView. + type: string + required: + - sparkChartType type: object - errorReportingPanel: - description: A widget that displays a list of error - groups. - properties: - projectRefs: - description: The projects from which to gather errors. - items: - description: The Project that this resource belongs - to. - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The `projectID` field of a project, - when not managed by KCC. - type: string - kind: - description: The kind of the Project resource; - optional but must be `Project` if provided. - type: string - name: - description: The `name` field of a `Project` - resource. - type: string - namespace: - description: The `namespace` field of a `Project` - resource. - type: string - type: object - type: array - services: - description: |- - An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used. - items: + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. type: string - type: array - versions: - description: Represents the source code version - that the developer provided, which could represent - a version label or a Git SHA-1 hash, for example. - For App Engine standard environment, the version - is set to the version of the app. - items: + direction: + description: The direction for the current threshold. + Direction is not allowed in a XyChart. type: string - type: array - type: object - id: - description: Optional. The widget id. Ids may be made - up of alphanumerics, dashes and underscores. Widget - ids are optional. - type: string - logsPanel: - description: A widget that shows a stream of logs. - properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to collect - logs for. Currently only projects are supported. - If empty, the widget will default to the host - project. - items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external - properties: - external: - description: The external name of the referenced - resource - type: string - kind: - description: Kind of the referent. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - type: object - type: array - type: object - pieChart: - description: A widget that displays timeseries data - as a pie chart. + label: + description: A label for the threshold. + type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed in + a Scorecard. + type: string + value: + description: The value of the threshold. The value + should be defined in the native scale of the + metric. + format: double + type: number + type: object + type: array + timeSeriesQuery: + description: Required. Fields for querying time series + data from the Stackdriver metrics API. properties: - chartType: - description: Required. Indicates the visualization - type for the PieChart. + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series with + PromQL. type: string - dataSets: - description: Required. The queries for the chart's - data. - items: - properties: - minAlignmentPeriod: - description: Optional. The lower bound on - data point frequency for this data set, - implemented by specifying the minimum alignment - period to use in a time series query. For - example, if the data is published once every - 10 minutes, the `min_alignment_period` should - be at least 10 minutes. It would not make - sense to fetch and align data at one minute - intervals. - type: string - sliceNameTemplate: - description: Optional. A template for the - name of the slice. This name will be displayed - in the legend and the tooltip of the pie - chart. It replaces the auto-generated names - for the slices. For example, if the template - is set to `${resource.labels.zone}`, the - zone's value will be used for the name instead - of the default name. - type: string - timeSeriesQuery: - description: Required. The query for the PieChart. - See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. + timeSeriesFilter: + description: Filter parameters to fetch time series. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views of + the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time - series with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch - time series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking to select + time series that pass through the filter. + type: string + numTimeSeries: + description: How many time series to allow + to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently to produce + the value which will be used to compare + the time series to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation after + `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio - between two time series filters. - properties: - denominator: - description: The denominator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking to select + time series that pass through the filter. + type: string + numTimeSeries: + description: How many time series to allow + to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently to produce + the value which will be used to compare + the time series to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation after + the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how the + time series are partitioned into subsets + prior to applying the aggregation operation. + Each subset contains time series that + have the same value for each of the grouping + fields. Each individual time series is + a member of exactly one subset. The `cross_series_reducer` + is applied to each subset of time series. + It is not possible to reduce across different + resource types, so this field implicitly + contains `resource.type`. Fields not + specified in `group_by_fields` are aggregated + away. If `group_by_fields` is not specified + and all the time series have the same + resource type, then the time series are + aggregated into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time - series with MQL. - type: string - unitOverride: - description: The unit of data contained - in fetched time series. If non-empty, - this unit will override any unit that - accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - showLabels: - description: Optional. Indicates whether or not - the pie chart should show slices' labels - type: boolean - required: - - chartType - - dataSets + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series with + MQL. + type: string + unitOverride: + description: The unit of data contained in fetched + time series. If non-empty, this unit will override + any unit that accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string type: object - scorecard: - description: A scorecard summarizing time series data. + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header for + easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the section + in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + singleViewGroup: + description: A widget that groups the other widgets by using + a dropdown menu. + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled properties: - gaugeView: - description: Will cause the scorecard to show a - gauge chart. - properties: - lowerBound: - description: The lower bound for this gauge - chart. The value of the chart should always - be greater than or equal to this. - format: double - type: number - upperBound: - description: The upper bound for this gauge - chart. The value of the chart should always - be less than or equal to this. - format: double - type: number - type: object - sparkChartView: - description: Will cause the scorecard to show a - spark chart. - properties: - minAlignmentPeriod: - description: The lower bound on data point frequency - in the chart implemented by specifying the - minimum alignment period to use in a time - series query. For example, if the data is - published once every 10 minutes it would not - make sense to fetch and align data at one - minute intervals. This field is optional and - exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart - to show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and content. + The title will still be larger relative to the + content. + type: string + horizontalAlignment: + description: The horizontal alignment of both the + title and content + type: string + padding: + description: The amount of padding around the widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. "#RRGGBB" + or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both the + title and content + type: string + type: object + type: object + timeSeriesTable: + description: A widget that displays time series data in + a tabular format. + properties: + columnSettings: + description: Optional. The list of the persistent column + settings for the table. + items: + properties: + column: + description: Required. The id of the column. + type: string + visible: + description: Required. Whether the column should + be visible on page load. + type: boolean + required: + - column + - visible + type: object + type: array + dataSets: + description: Required. The data displayed in this table. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on data + point frequency for this data set, implemented + by specifying the minimum alignment period to + use in a time series query For example, if the + data is published once every 10 minutes, the + `min_alignment_period` should be at least 10 + minutes. It would not make sense to fetch and + align data at one minute intervals. + type: string + tableDisplayOptions: + description: Optional. Table display options for + configuring how the table is rendered. + properties: + shownColumns: + description: Optional. This field is unused + and has been replaced by TimeSeriesTable.column_settings + items: + type: string + type: array + type: object + tableTemplate: + description: Optional. A template string for naming + `TimeSeries` in the resulting data set. This + should be a string with interpolations of the + form `${label_name}`, which will resolve to + the label's value i.e. "${resource.labels.project_id}." + type: string + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in a - XyChart. - type: string - label: - description: A label for the threshold. - type: string - targetAxis: - description: The target axis to use for plotting - the threshold. Target axis is not allowed - in a Scorecard. + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. type: string - value: - description: The value of the threshold. The - value should be defined in the native scale - of the metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time series - with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking - to select time series that pass through - the filter. - type: string - numTimeSeries: - description: How many time series to - allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + The maximum value of the `alignment_period` is 2 years, or 104 weeks. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series filter. - properties: - direction: - description: How to use the ranking - to select time series that pass through - the filter. - type: string - numTimeSeries: - description: How many time series to - allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - items: + The maximum value of the `alignment_period` is 2 years, or 104 weeks. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in fetched - time series. If non-empty, this unit will - override any unit that accompanies fetched - data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header - for easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below the - section in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - singleViewGroup: - description: A widget that groups the other widgets - by using a dropdown menu. - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. - type: string - style: - description: How the text is styled - properties: - backgroundColor: - description: The background color as a hex string. - "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title and - content. The title will still be larger relative - to the content. - type: string - horizontalAlignment: - description: The horizontal alignment of both - the title and content - type: string - padding: - description: The amount of padding around the - widget - type: string - pointerLocation: - description: The pointer location for this widget - (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. - "#RRGGBB" or "#RGB" - type: string - verticalAlignment: - description: The vertical alignment of both - the title and content - type: string - type: object - type: object - title: - description: Optional. The title of the widget. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + type: object + type: array + metricVisualization: + description: Optional. Store rendering strategy type: string - xyChart: - description: A chart of time series data. + required: + - dataSets + type: object + title: + description: Optional. The title of the widget. + type: string + xyChart: + description: A chart of time series data. + properties: + chartOptions: + description: Display options for the chart. properties: - chartOptions: - description: Display options for the chart. - properties: - mode: - description: The chart mode. - type: string - type: object - dataSets: - description: Required. The data displayed in this - chart. - items: + mode: + description: The chart mode. + type: string + type: object + dataSets: + description: Required. The data displayed in this chart. + items: + properties: + legendTemplate: + description: A template string for naming `TimeSeries` + in the resulting data set. This should be a + string with interpolations of the form `${label_name}`, + which will resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on data + point frequency for this data set, implemented + by specifying the minimum alignment period to + use in a time series query For example, if the + data is published once every 10 minutes, the + `min_alignment_period` should be at least 10 + minutes. It would not make sense to fetch and + align data at one minute intervals. + type: string + plotType: + description: How this data should be plotted on + the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. properties: - legendTemplate: - description: A template string for naming - `TimeSeries` in the resulting data set. - This should be a string with interpolations - of the form `${label_name}`, which will - resolve to the label's value. - type: string - minAlignmentPeriod: - description: Optional. The lower bound on - data point frequency for this data set, - implemented by specifying the minimum alignment - period to use in a time series query For - example, if the data is published once every - 10 minutes, the `min_alignment_period` should - be at least 10 minutes. It would not make - sense to fetch and align data at one minute - intervals. - type: string - plotType: - description: How this data should be plotted - on the chart. + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. type: string - timeSeriesQuery: - description: Required. Fields for querying - time series data from the Stackdriver metrics - API. + timeSeriesFilter: + description: Filter parameters to fetch time + series. properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time - series with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch - time series. + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is @@ -6082,33 +5374,17 @@ spec: that identifies the metric types, resources, and projects to query. type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. properties: alignmentPeriod: description: |- @@ -6192,1185 +5468,4709 @@ spec: returned. type: string type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string required: - filter type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio - between two time series filters. + pickTimeSeriesFilter: + description: Ranking based time series + filter. properties: - denominator: - description: The denominator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the - ratio. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member - of exactly one subset. The - `cross_series_reducer` is - applied to each subset of - time series. It is not possible - to reduce across different - resource types, so this - field implicitly contains - `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the time - series have the same resource - type, then the time series - are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to other - time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string type: object - timeSeriesQueryLanguage: - description: A query used to fetch time - series with MQL. - type: string - unitOverride: - description: The unit of data contained - in fetched time series. If non-empty, - this unit will override any unit that - accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string type: object - required: - - timeSeriesQuery - type: object - type: array - thresholds: - description: Threshold lines drawn horizontally - across the chart. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in a - XyChart. - type: string - label: - description: A label for the threshold. + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. type: string - targetAxis: - description: The target axis to use for plotting - the threshold. Target axis is not allowed - in a Scorecard. + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. type: string - value: - description: The value of the threshold. The - value should be defined in the native scale - of the metric. - format: double - type: number type: object - type: array - timeshiftDuration: - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows - values from two similar-length time periods (e.g., - week-over-week metrics). The duration must be - positive, and it can only be applied to charts - with data sets of LINE plot type. - type: string - xAxis: - description: The properties applied to the x-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object - yAxis: - description: The properties applied to the y-axis. - properties: - label: - description: The label of the axis. - type: string - scale: - description: The axis scale. By default, a linear - scale is used. - type: string - type: object - required: - - dataSets - type: object - type: object - width: - description: The width of the tile, measured in grid blocks. - Tiles must have a minimum width of 1. - format: int32 - type: integer - xPos: - description: The zero-indexed position of the tile in grid - blocks relative to the left edge of the grid. Tiles must - be contained within the specified number of columns. `x_pos` - cannot be negative. - format: int32 - type: integer - yPos: - description: The zero-indexed position of the tile in grid - blocks relative to the top edge of the grid. `y_pos` cannot - be negative. - format: int32 - type: integer + required: + - timeSeriesQuery + type: object + type: array + thresholds: + description: Threshold lines drawn horizontally across + the chart. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current threshold. + Direction is not allowed in a XyChart. + type: string + label: + description: A label for the threshold. + type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed in + a Scorecard. + type: string + value: + description: The value of the threshold. The value + should be defined in the native scale of the + metric. + format: double + type: number + type: object + type: array + timeshiftDuration: + description: The duration used to display a comparison + chart. A comparison chart simultaneously shows values + from two similar-length time periods (e.g., week-over-week + metrics). The duration must be positive, and it can + only be applied to charts with data sets of LINE plot + type. + type: string + xAxis: + description: The properties applied to the x-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a linear + scale is used. + type: string + type: object + yAxis: + description: The properties applied to the y-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a linear + scale is used. + type: string + type: object + required: + - dataSets + type: object type: object type: array type: object - projectRef: - description: Immutable. The Project that this resource belongs to. - oneOf: - - not: - required: - - external - required: - - name - - not: - anyOf: - - required: - - name - - required: - - namespace - required: - - external - properties: - external: - description: The `projectID` field of a project, when not managed - by KCC. - type: string - kind: - description: The kind of the Project resource; optional but must - be `Project` if provided. - type: string - name: - description: The `name` field of a `Project` resource. - type: string - namespace: - description: The `namespace` field of a `Project` resource. - type: string - type: object - resourceID: - description: Immutable. Optional. The name of the resource. Used for - creation and acquisition. When unset, the value of `metadata.name` - is used as the default. - type: string - rowLayout: - description: The content is divided into equally spaced rows and the - widgets are arranged horizontally. + mosaicLayout: + description: The content is arranged as a grid of tiles, with each + content widget occupying one or more grid blocks. properties: - rows: - description: The rows of content to display. + columns: + description: The number of columns in the mosaic grid. The number + of columns must be between 1 and 12, inclusive. + format: int32 + type: integer + tiles: + description: The tiles to display. items: properties: - weight: - description: The relative weight of this row. The row weight - is used to adjust the height of rows on the screen (relative - to peers). Greater the weight, greater the height of the - row on the screen. If omitted, a value of 1 is used while - rendering. - format: int64 + height: + description: The height of the tile, measured in grid blocks. + Tiles must have a minimum height of 1. + format: int32 type: integer - widgets: - description: The display widgets arranged horizontally in - this row. - items: - properties: - alertChart: - description: A chart of alert policy data. - properties: - alertPolicyRef: - description: Required. A reference to the MonitoringAlertPolicy. + widget: + description: The informational widget contained in the tile. + For example an `XyChart`. + properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by the + grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget on + first page load. + type: boolean + type: object + errorReportingPanel: + description: A widget that displays a list of error + groups. + properties: + projectRefs: + description: The projects from which to gather errors. + items: + description: The Project that this resource belongs + to. oneOf: - not: required: - external required: - name + - kind - not: anyOf: - required: - name - required: - namespace + - required: + - kind required: - external properties: external: - description: The MonitoringAlertPolicy link - in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + description: The `projectID` field of a project, when not managed by KCC. type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string name: - description: The `name` field of a `MonitoringAlertPolicy` + description: The `name` field of a `Project` resource. type: string namespace: - description: The `namespace` field of a `MonitoringAlertPolicy` + description: The `namespace` field of a `Project` resource. type: string type: object - required: - - alertPolicyRef - type: object - blank: - description: A blank space. - type: object - collapsibleGroup: - description: A widget that groups the other widgets. - All widgets that are within the area spanned by - the grouping widget are considered member widgets. - properties: - collapsed: - description: The collapsed state of the widget - on first page load. - type: boolean - type: object - errorReportingPanel: - description: A widget that displays a list of error - groups. - properties: - projectRefs: - description: The projects from which to gather - errors. - items: - description: The Project that this resource - belongs to. - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version + that the developer provided, which could represent + a version label or a Git SHA-1 hash, for example. + For App Engine standard environment, the version + is set to the version of the app. + items: + type: string + type: array + type: object + id: + description: Optional. The widget id. Ids may be made + up of alphanumerics, dashes and underscores. Widget + ids are optional. + type: string + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to collect + logs for. Currently only projects are supported. + If empty, the widget will default to the host + project. + items: + oneOf: + - not: required: - external - properties: - external: - description: The `projectID` field of a - project, when not managed by KCC. - type: string - kind: - description: The kind of the Project resource; - optional but must be `Project` if provided. - type: string - name: - description: The `name` field of a `Project` - resource. - type: string - namespace: - description: The `namespace` field of a - `Project` resource. - type: string - type: object - type: array - services: - description: |- - An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data + as a pie chart. + properties: + chartType: + description: Required. Indicates the visualization + type for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's + data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum alignment + period to use in a time series query. For + example, if the data is published once every + 10 minutes, the `min_alignment_period` should + be at least 10 minutes. It would not make + sense to fetch and align data at one minute + intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the + name of the slice. This name will be displayed + in the legend and the tooltip of the pie + chart. It replaces the auto-generated names + for the slices. For example, if the template + is set to `${resource.labels.zone}`, the + zone's value will be used for the name instead + of the default name. + type: string + timeSeriesQuery: + description: Required. The query for the PieChart. + See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + showLabels: + description: Optional. Indicates whether or not + the pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets + type: object + scorecard: + description: A scorecard summarizing time series data. + properties: + gaugeView: + description: Will cause the scorecard to show a + gauge chart. + properties: + lowerBound: + description: The lower bound for this gauge + chart. The value of the chart should always + be greater than or equal to this. + format: double + type: number + upperBound: + description: The upper bound for this gauge + chart. The value of the chart should always + be less than or equal to this. + format: double + type: number + type: object + sparkChartView: + description: Will cause the scorecard to show a + spark chart. + properties: + minAlignmentPeriod: + description: The lower bound on data point frequency + in the chart implemented by specifying the + minimum alignment period to use in a time + series query. For example, if the data is + published once every 10 minutes it would not + make sense to fetch and align data at one + minute intervals. This field is optional and + exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart + to show in this chartView. + type: string + required: + - sparkChartType + type: object + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in a + XyChart. + type: string + label: + description: A label for the threshold. + type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed + in a Scorecard. + type: string + value: + description: The value of the threshold. The + value should be defined in the native scale + of the metric. + format: double + type: number + type: object + type: array + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time series + data is returned. Use this field to combine + multiple time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking + to select time series that pass through + the filter. + type: string + numTimeSeries: + description: How many time series to + allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently + to produce the value which will be + used to compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets prior + to applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets prior + to applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series filter. + properties: + direction: + description: How to use the ranking + to select time series that pass through + the filter. + type: string + numTimeSeries: + description: How many time series to + allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is applied + to each time series independently + to produce the value which will be + used to compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to preserve + when `cross_series_reducer` is specified. + The `group_by_fields` determine how + the time series are partitioned into + subsets prior to applying the aggregation + operation. Each subset contains time + series that have the same value for + each of the grouping fields. Each + individual time series is a member + of exactly one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, so + this field implicitly contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the time + series have the same resource type, + then the time series are aggregated + into a single output time series. + If `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in fetched + time series. If non-empty, this unit will + override any unit that accompanies fetched + data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below the + section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + singleViewGroup: + description: A widget that groups the other widgets + by using a dropdown menu. + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex string. + "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title and + content. The title will still be larger relative + to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around the + widget + type: string + pointerLocation: + description: The pointer location for this widget + (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object + type: object + timeSeriesTable: + description: A widget that displays time series data + in a tabular format. + properties: + columnSettings: + description: Optional. The list of the persistent + column settings for the table. + items: + properties: + column: + description: Required. The id of the column. + type: string + visible: + description: Required. Whether the column + should be visible on page load. + type: boolean + required: + - column + - visible + type: object + type: array + dataSets: + description: Required. The data displayed in this + table. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum alignment + period to use in a time series query For + example, if the data is published once every + 10 minutes, the `min_alignment_period` should + be at least 10 minutes. It would not make + sense to fetch and align data at one minute + intervals. + type: string + tableDisplayOptions: + description: Optional. Table display options + for configuring how the table is rendered. + properties: + shownColumns: + description: Optional. This field is unused + and has been replaced by TimeSeriesTable.column_settings + items: + type: string + type: array + type: object + tableTemplate: + description: Optional. A template string for + naming `TimeSeries` in the resulting data + set. This should be a string with interpolations + of the form `${label_name}`, which will + resolve to the label's value i.e. "${resource.labels.project_id}." + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver metrics + API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + type: object + type: array + metricVisualization: + description: Optional. Store rendering strategy + type: string + required: + - dataSets + type: object + title: + description: Optional. The title of the widget. + type: string + xyChart: + description: A chart of time series data. + properties: + chartOptions: + description: Display options for the chart. + properties: + mode: + description: The chart mode. + type: string + type: object + dataSets: + description: Required. The data displayed in this + chart. + items: + properties: + legendTemplate: + description: A template string for naming + `TimeSeries` in the resulting data set. + This should be a string with interpolations + of the form `${label_name}`, which will + resolve to the label's value. + type: string + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum alignment + period to use in a time series query For + example, if the data is published once every + 10 minutes, the `min_alignment_period` should + be at least 10 minutes. It would not make + sense to fetch and align data at one minute + intervals. + type: string + plotType: + description: How this data should be plotted + on the chart. + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver metrics + API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member + of exactly one subset. The + `cross_series_reducer` is + applied to each subset of + time series. It is not possible + to reduce across different + resource types, so this + field implicitly contains + `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the time + series have the same resource + type, then the time series + are aggregated into a single + output time series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to other + time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + type: array + thresholds: + description: Threshold lines drawn horizontally + across the chart. + items: + properties: + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. + type: string + direction: + description: The direction for the current + threshold. Direction is not allowed in a + XyChart. + type: string + label: + description: A label for the threshold. + type: string + targetAxis: + description: The target axis to use for plotting + the threshold. Target axis is not allowed + in a Scorecard. + type: string + value: + description: The value of the threshold. The + value should be defined in the native scale + of the metric. + format: double + type: number + type: object + type: array + timeshiftDuration: + description: The duration used to display a comparison + chart. A comparison chart simultaneously shows + values from two similar-length time periods (e.g., + week-over-week metrics). The duration must be + positive, and it can only be applied to charts + with data sets of LINE plot type. + type: string + xAxis: + description: The properties applied to the x-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a linear + scale is used. + type: string + type: object + yAxis: + description: The properties applied to the y-axis. + properties: + label: + description: The label of the axis. + type: string + scale: + description: The axis scale. By default, a linear + scale is used. + type: string + type: object + required: + - dataSets + type: object + type: object + width: + description: The width of the tile, measured in grid blocks. + Tiles must have a minimum width of 1. + format: int32 + type: integer + xPos: + description: The zero-indexed position of the tile in grid + blocks relative to the left edge of the grid. Tiles must + be contained within the specified number of columns. `x_pos` + cannot be negative. + format: int32 + type: integer + yPos: + description: The zero-indexed position of the tile in grid + blocks relative to the top edge of the grid. `y_pos` cannot + be negative. + format: int32 + type: integer + type: object + type: array + type: object + projectRef: + description: Immutable. The Project that this resource belongs to. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The `projectID` field of a project, when not managed + by KCC. + type: string + kind: + description: The kind of the Project resource; optional but must + be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` resource. + type: string + namespace: + description: The `namespace` field of a `Project` resource. + type: string + type: object + resourceID: + description: Immutable. Optional. The name of the resource. Used for + creation and acquisition. When unset, the value of `metadata.name` + is used as the default. + type: string + rowLayout: + description: The content is divided into equally spaced rows and the + widgets are arranged horizontally. + properties: + rows: + description: The rows of content to display. + items: + properties: + weight: + description: The relative weight of this row. The row weight + is used to adjust the height of rows on the screen (relative + to peers). Greater the weight, greater the height of the + row on the screen. If omitted, a value of 1 is used while + rendering. + format: int64 + type: integer + widgets: + description: The display widgets arranged horizontally in + this row. + items: + properties: + alertChart: + description: A chart of alert policy data. + properties: + alertPolicyRef: + description: Required. A reference to the MonitoringAlertPolicy. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: The MonitoringAlertPolicy link + in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", + when not managed by KCC. + type: string + name: + description: The `name` field of a `MonitoringAlertPolicy` + resource. + type: string + namespace: + description: The `namespace` field of a `MonitoringAlertPolicy` + resource. + type: string + type: object + required: + - alertPolicyRef + type: object + blank: + description: A blank space. + type: object + collapsibleGroup: + description: A widget that groups the other widgets. + All widgets that are within the area spanned by + the grouping widget are considered member widgets. + properties: + collapsed: + description: The collapsed state of the widget + on first page load. + type: boolean + type: object + errorReportingPanel: + description: A widget that displays a list of error + groups. + properties: + projectRefs: + description: The projects from which to gather + errors. + items: + description: The Project that this resource + belongs to. + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The `projectID` field of a + project, when not managed by KCC. + type: string + kind: + description: The kind of the Project resource; + optional but must be `Project` if provided. + type: string + name: + description: The `name` field of a `Project` + resource. + type: string + namespace: + description: The `namespace` field of a + `Project` resource. + type: string + type: object + type: array + services: + description: |- + An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used. + items: + type: string + type: array + versions: + description: Represents the source code version + that the developer provided, which could represent + a version label or a Git SHA-1 hash, for example. + For App Engine standard environment, the version + is set to the version of the app. + items: + type: string + type: array + type: object + id: + description: Optional. The widget id. Ids may be made + up of alphanumerics, dashes and underscores. Widget + ids are optional. + type: string + logsPanel: + description: A widget that shows a stream of logs. + properties: + filter: + description: A filter that chooses which log entries + to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + Only log entries that match the filter are returned. An + empty filter matches all log entries. + type: string + resourceNames: + description: The names of logging resources to + collect logs for. Currently only projects are + supported. If empty, the widget will default + to the host project. + items: + oneOf: + - not: + required: + - external + required: + - name + - kind + - not: + anyOf: + - required: + - name + - required: + - namespace + - required: + - kind + required: + - external + properties: + external: + description: The external name of the referenced + resource + type: string + kind: + description: Kind of the referent. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + type: array + type: object + pieChart: + description: A widget that displays timeseries data + as a pie chart. + properties: + chartType: + description: Required. Indicates the visualization + type for the PieChart. + type: string + dataSets: + description: Required. The queries for the chart's + data. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query. For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. + type: string + sliceNameTemplate: + description: Optional. A template for the + name of the slice. This name will be displayed + in the legend and the tooltip of the pie + chart. It replaces the auto-generated + names for the slices. For example, if + the template is set to `${resource.labels.zone}`, + the zone's value will be used for the + name instead of the default name. + type: string + timeSeriesQuery: + description: Required. The query for the + PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used. - items: - type: string - type: array - versions: - description: Represents the source code version - that the developer provided, which could represent - a version label or a Git SHA-1 hash, for example. - For App Engine standard environment, the version - is set to the version of the app. - items: - type: string + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object type: array + showLabels: + description: Optional. Indicates whether or not + the pie chart should show slices' labels + type: boolean + required: + - chartType + - dataSets type: object - id: - description: Optional. The widget id. Ids may be made - up of alphanumerics, dashes and underscores. Widget - ids are optional. - type: string - logsPanel: - description: A widget that shows a stream of logs. + scorecard: + description: A scorecard summarizing time series data. properties: - filter: - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - type: string - resourceNames: - description: The names of logging resources to - collect logs for. Currently only projects are - supported. If empty, the widget will default - to the host project. + gaugeView: + description: Will cause the scorecard to show + a gauge chart. + properties: + lowerBound: + description: The lower bound for this gauge + chart. The value of the chart should always + be greater than or equal to this. + format: double + type: number + upperBound: + description: The upper bound for this gauge + chart. The value of the chart should always + be less than or equal to this. + format: double + type: number + type: object + sparkChartView: + description: Will cause the scorecard to show + a spark chart. + properties: + minAlignmentPeriod: + description: The lower bound on data point + frequency in the chart implemented by specifying + the minimum alignment period to use in a + time series query. For example, if the data + is published once every 10 minutes it would + not make sense to fetch and align data at + one minute intervals. This field is optional + and exists only as a hint. + type: string + sparkChartType: + description: Required. The type of sparkchart + to show in this chartView. + type: string + required: + - sparkChartType + type: object + thresholds: + description: |- + The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state. items: - oneOf: - - not: - required: - - external - required: - - name - - kind - - not: - anyOf: - - required: - - name - - required: - - namespace - - required: - - kind - required: - - external properties: - external: - description: The external name of the referenced - resource + color: + description: The state color for this threshold. + Color is not allowed in a XyChart. type: string - kind: - description: Kind of the referent. + direction: + description: The direction for the current + threshold. Direction is not allowed in + a XyChart. type: string - name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + label: + description: A label for the threshold. type: string - namespace: - description: 'Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + targetAxis: + description: The target axis to use for + plotting the threshold. Target axis is + not allowed in a Scorecard. type: string + value: + description: The value of the threshold. + The value should be defined in the native + scale of the metric. + format: double + type: number type: object type: array - type: object - pieChart: - description: A widget that displays timeseries data - as a pie chart. - properties: - chartType: - description: Required. Indicates the visualization - type for the PieChart. - type: string - dataSets: - description: Required. The queries for the chart's - data. - items: - properties: - minAlignmentPeriod: - description: Optional. The lower bound on - data point frequency for this data set, - implemented by specifying the minimum - alignment period to use in a time series - query. For example, if the data is published - once every 10 minutes, the `min_alignment_period` - should be at least 10 minutes. It would - not make sense to fetch and align data - at one minute intervals. - type: string - sliceNameTemplate: - description: Optional. A template for the - name of the slice. This name will be displayed - in the legend and the tooltip of the pie - chart. It replaces the auto-generated - names for the slices. For example, if - the template is set to `${resource.labels.zone}`, - the zone's value will be used for the - name instead of the default name. - type: string - timeSeriesQuery: - description: Required. The query for the - PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. + timeSeriesQuery: + description: Required. Fields for querying time + series data from the Stackdriver metrics API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time series + with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch time + series. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this field + to combine multiple time series for + different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time - series with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch - time series. - properties: - aggregation: - description: By default, the raw - time series data is returned. - Use this field to combine multiple - time series for different views - of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, resources, + and projects to query. + type: string + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member of - exactly one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is not - possible to reduce across - different resource types, - so this field implicitly contains - `resource.type`. Fields not - specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same - resource type, then the time - series are aggregated into - a single output time series. - If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time - series filter. - properties: - direction: - description: How to use the - ranking to select time series - that pass through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio between + two time series filters. + properties: + denominator: + description: The denominator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member of - exactly one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is not - possible to reduce across - different resource types, - so this field implicitly contains - `resource.type`. Fields not - specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same - resource type, then the time - series are aggregated into - a single output time series. - If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio - between two time series filters. - properties: - denominator: - description: The denominator of - the ratio. - properties: - aggregation: - description: By default, the - raw time series data is returned. - Use this field to combine - multiple time series for different - views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + numerator: + description: The numerator of the ratio. + properties: + aggregation: + description: By default, the raw time + series data is returned. Use this + field to combine multiple time series + for different views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of - fields to preserve when - `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time - series are partitioned - into subsets prior to - applying the aggregation - operation. Each subset - contains time series that - have the same value for - each of the grouping fields. - Each individual time series - is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is - not possible to reduce - across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the - time series have the same - resource type, then the - time series are aggregated - into a single output time - series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric - types, resources, and projects - to query. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the same + value for each of the grouping + fields. Each individual time + series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of + time series. It is not possible + to reduce across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same resource + type, then the time series are + aggregated into a single output + time series. If `cross_series_reducer` + is not defined, this field is + ignored. + items: type: string - required: - - filter - type: object - numerator: - description: The numerator of the - ratio. - properties: - aggregation: - description: By default, the - raw time series data is returned. - Use this field to combine - multiple time series for different - views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of - fields to preserve when - `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time - series are partitioned - into subsets prior to - applying the aggregation - operation. Each subset - contains time series that - have the same value for - each of the grouping fields. - Each individual time series - is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is - not possible to reduce - across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If - `group_by_fields` is not - specified and all the - time series have the same - resource type, then the - time series are aggregated - into a single output time - series. If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. + type: string + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time series + filter. + properties: + direction: + description: How to use the ranking + to select time series that pass + through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` is + applied to each time series independently + to produce the value which will + be used to compare the time series + to other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric - types, resources, and projects - to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time - series filter. - properties: - direction: - description: How to use the - ranking to select time series - that pass through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the - filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` - is applied to each time series - independently to produce the - value which will be used to - compare the time series to - other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields to + preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series are + partitioned into subsets prior to + applying the aggregation operation. + Each subset contains time series + that have the same value for each + of the grouping fields. Each individual + time series is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset of time + series. It is not possible to reduce + across different resource types, + so this field implicitly contains + `resource.type`. Fields not specified + in `group_by_fields` are aggregated + away. If `group_by_fields` is not + specified and all the time series + have the same resource type, then + the time series are aggregated into + a single output time series. If + `cross_series_reducer` is not defined, + this field is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time series + with MQL. + type: string + unitOverride: + description: The unit of data contained in + fetched time series. If non-empty, this + unit will override any unit that accompanies + fetched data. The format is the same as + the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + required: + - timeSeriesQuery + type: object + sectionHeader: + description: A widget that defines a section header + for easier navigation of the dashboard. + properties: + dividerBelow: + description: Whether to insert a divider below + the section in the table of contents + type: boolean + subtitle: + description: The subtitle of the section + type: string + type: object + singleViewGroup: + description: A widget that groups the other widgets + by using a dropdown menu. + type: object + text: + description: A raw string or markdown displaying textual + content. + properties: + content: + description: The text content to be displayed. + type: string + format: + description: How the text content is formatted. + type: string + style: + description: How the text is styled + properties: + backgroundColor: + description: The background color as a hex + string. "#RRGGBB" or "#RGB" + type: string + fontSize: + description: Font sizes for both the title + and content. The title will still be larger + relative to the content. + type: string + horizontalAlignment: + description: The horizontal alignment of both + the title and content + type: string + padding: + description: The amount of padding around + the widget + type: string + pointerLocation: + description: The pointer location for this + widget (also sometimes called a "tail") + type: string + textColor: + description: The text color as a hex string. + "#RRGGBB" or "#RGB" + type: string + verticalAlignment: + description: The vertical alignment of both + the title and content + type: string + type: object + type: object + timeSeriesTable: + description: A widget that displays time series data + in a tabular format. + properties: + columnSettings: + description: Optional. The list of the persistent + column settings for the table. + items: + properties: + column: + description: Required. The id of the column. + type: string + visible: + description: Required. Whether the column + should be visible on page load. + type: boolean + required: + - column + - visible + type: object + type: array + dataSets: + description: Required. The data displayed in this + table. + items: + properties: + minAlignmentPeriod: + description: Optional. The lower bound on + data point frequency for this data set, + implemented by specifying the minimum + alignment period to use in a time series + query For example, if the data is published + once every 10 minutes, the `min_alignment_period` + should be at least 10 minutes. It would + not make sense to fetch and align data + at one minute intervals. + type: string + tableDisplayOptions: + description: Optional. Table display options + for configuring how the table is rendered. + properties: + shownColumns: + description: Optional. This field is + unused and has been replaced by TimeSeriesTable.column_settings + items: + type: string + type: array + type: object + tableTemplate: + description: Optional. A template string + for naming `TimeSeries` in the resulting + data set. This should be a string with + interpolations of the form `${label_name}`, + which will resolve to the label's value + i.e. "${resource.labels.project_id}." + type: string + timeSeriesQuery: + description: Required. Fields for querying + time series data from the Stackdriver + metrics API. + properties: + outputFullDuration: + description: |- + Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart + type: boolean + prometheusQuery: + description: A query used to fetch time + series with PromQL. + type: string + timeSeriesFilter: + description: Filter parameters to fetch + time series. + properties: + aggregation: + description: By default, the raw + time series data is returned. + Use this field to combine multiple + time series for different views + of the data. properties: alignmentPeriod: description: |- @@ -7407,798 +10207,531 @@ spec: groupByFields: description: The set of fields to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the - same value for each of the - grouping fields. Each individual - time series is a member of - exactly one subset. The `cross_series_reducer` - is applied to each subset - of time series. It is not - possible to reduce across - different resource types, - so this field implicitly contains - `resource.type`. Fields not - specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same - resource type, then the time - series are aggregated into - a single output time series. - If `cross_series_reducer` - is not defined, this field - is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time - series with MQL. - type: string - unitOverride: - description: The unit of data contained - in fetched time series. If non-empty, - this unit will override any unit that - accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - type: array - showLabels: - description: Optional. Indicates whether or not - the pie chart should show slices' labels - type: boolean - required: - - chartType - - dataSets - type: object - scorecard: - description: A scorecard summarizing time series data. - properties: - gaugeView: - description: Will cause the scorecard to show - a gauge chart. - properties: - lowerBound: - description: The lower bound for this gauge - chart. The value of the chart should always - be greater than or equal to this. - format: double - type: number - upperBound: - description: The upper bound for this gauge - chart. The value of the chart should always - be less than or equal to this. - format: double - type: number - type: object - sparkChartView: - description: Will cause the scorecard to show - a spark chart. - properties: - minAlignmentPeriod: - description: The lower bound on data point - frequency in the chart implemented by specifying - the minimum alignment period to use in a - time series query. For example, if the data - is published once every 10 minutes it would - not make sense to fetch and align data at - one minute intervals. This field is optional - and exists only as a hint. - type: string - sparkChartType: - description: Required. The type of sparkchart - to show in this chartView. - type: string - required: - - sparkChartType - type: object - thresholds: - description: |- - The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state. - items: - properties: - color: - description: The state color for this threshold. - Color is not allowed in a XyChart. - type: string - direction: - description: The direction for the current - threshold. Direction is not allowed in - a XyChart. - type: string - label: - description: A label for the threshold. - type: string - targetAxis: - description: The target axis to use for - plotting the threshold. Target axis is - not allowed in a Scorecard. - type: string - value: - description: The value of the threshold. - The value should be defined in the native - scale of the metric. - format: double - type: number - type: object - type: array - timeSeriesQuery: - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - outputFullDuration: - description: |- - Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart - type: boolean - prometheusQuery: - description: A query used to fetch time series - with PromQL. - type: string - timeSeriesFilter: - description: Filter parameters to fetch time - series. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric types, + resources, and projects to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after `aggregation` is applied. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - type: string - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + required: + - filter + type: object + timeSeriesFilterRatio: + description: Parameters to fetch a ratio + between two time series filters. + properties: + denominator: + description: The denominator of + the ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - required: - - filter - type: object - timeSeriesFilterRatio: - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - description: The denominator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + required: + - filter + type: object + numerator: + description: The numerator of the + ratio. + properties: + aggregation: + description: By default, the + raw time series data is returned. + Use this field to combine + multiple time series for different + views of the data. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - numerator: - description: The numerator of the ratio. - properties: - aggregation: - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of + fields to preserve when + `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time + series are partitioned + into subsets prior to + applying the aggregation + operation. Each subset + contains time series that + have the same value for + each of the grouping fields. + Each individual time series + is a member of exactly + one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is + not possible to reduce + across different resource + types, so this field implicitly + contains `resource.type`. Fields + not specified in `group_by_fields` + are aggregated away. If + `group_by_fields` is not + specified and all the + time series have the same + resource type, then the + time series are aggregated + into a single output time + series. If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - items: + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + filter: + description: Required. The [monitoring + filter](https://cloud.google.com/monitoring/api/v3/filters) + that identifies the metric + types, resources, and projects + to query. type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - filter: - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - type: string - required: - - filter - type: object - pickTimeSeriesFilter: - description: Ranking based time series - filter. - properties: - direction: - description: How to use the ranking - to select time series that pass - through the filter. - type: string - numTimeSeries: - description: How many time series - to allow to pass through the filter. - format: int32 - type: integer - rankingMethod: - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series.' - type: string - type: object - secondaryAggregation: - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - description: |- - The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. + required: + - filter + type: object + pickTimeSeriesFilter: + description: Ranking based time + series filter. + properties: + direction: + description: How to use the + ranking to select time series + that pass through the filter. + type: string + numTimeSeries: + description: How many time series + to allow to pass through the + filter. + format: int32 + type: integer + rankingMethod: + description: '`ranking_method` + is applied to each time series + independently to produce the + value which will be used to + compare the time series to + other time series.' + type: string + type: object + secondaryAggregation: + description: Apply a second aggregation + after the ratio is computed. + properties: + alignmentPeriod: + description: |- + The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. - The maximum value of the `alignment_period` is 2 years, or 104 weeks. - type: string - crossSeriesReducer: - description: |- - The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. + The maximum value of the `alignment_period` is 2 years, or 104 weeks. + type: string + crossSeriesReducer: + description: |- + The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned. - type: string - groupByFields: - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - items: - type: string - type: array - perSeriesAligner: - description: |- - An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned. + type: string + groupByFields: + description: The set of fields + to preserve when `cross_series_reducer` + is specified. The `group_by_fields` + determine how the time series + are partitioned into subsets + prior to applying the aggregation + operation. Each subset contains + time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of + exactly one subset. The `cross_series_reducer` + is applied to each subset + of time series. It is not + possible to reduce across + different resource types, + so this field implicitly contains + `resource.type`. Fields not + specified in `group_by_fields` + are aggregated away. If `group_by_fields` + is not specified and all the + time series have the same + resource type, then the time + series are aggregated into + a single output time series. + If `cross_series_reducer` + is not defined, this field + is ignored. + items: + type: string + type: array + perSeriesAligner: + description: |- + An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned. - type: string - type: object - type: object - timeSeriesQueryLanguage: - description: A query used to fetch time series - with MQL. - type: string - unitOverride: - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - type: string - type: object - required: - - timeSeriesQuery - type: object - sectionHeader: - description: A widget that defines a section header - for easier navigation of the dashboard. - properties: - dividerBelow: - description: Whether to insert a divider below - the section in the table of contents - type: boolean - subtitle: - description: The subtitle of the section - type: string - type: object - singleViewGroup: - description: A widget that groups the other widgets - by using a dropdown menu. - type: object - text: - description: A raw string or markdown displaying textual - content. - properties: - content: - description: The text content to be displayed. - type: string - format: - description: How the text content is formatted. + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned. + type: string + type: object + type: object + timeSeriesQueryLanguage: + description: A query used to fetch time + series with MQL. + type: string + unitOverride: + description: The unit of data contained + in fetched time series. If non-empty, + this unit will override any unit that + accompanies fetched data. The format + is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) + field in `MetricDescriptor`. + type: string + type: object + type: object + type: array + metricVisualization: + description: Optional. Store rendering strategy type: string - style: - description: How the text is styled - properties: - backgroundColor: - description: The background color as a hex - string. "#RRGGBB" or "#RGB" - type: string - fontSize: - description: Font sizes for both the title - and content. The title will still be larger - relative to the content. - type: string - horizontalAlignment: - description: The horizontal alignment of both - the title and content - type: string - padding: - description: The amount of padding around - the widget - type: string - pointerLocation: - description: The pointer location for this - widget (also sometimes called a "tail") - type: string - textColor: - description: The text color as a hex string. - "#RRGGBB" or "#RGB" - type: string - verticalAlignment: - description: The vertical alignment of both - the title and content - type: string - type: object + required: + - dataSets type: object title: description: Optional. The title of the widget. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index e7f5c615ba..90b41711e5 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -118,6 +118,14 @@ type DashboardColumnLayout struct { Columns []DashboardColumns `json:"columns,omitempty"` } +type DashboardColumnSettings struct { + /* Required. The id of the column. */ + Column string `json:"column"` + + /* Required. Whether the column should be visible on page load. */ + Visible bool `json:"visible"` +} + type DashboardColumns struct { /* The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. */ // +optional @@ -464,6 +472,12 @@ type DashboardStyle struct { VerticalAlignment *string `json:"verticalAlignment,omitempty"` } +type DashboardTableDisplayOptions struct { + /* Optional. This field is unused and has been replaced by TimeSeriesTable.column_settings */ + // +optional + ShownColumns []string `json:"shownColumns,omitempty"` +} + type DashboardText struct { /* The text content to be displayed. */ // +optional @@ -589,6 +603,19 @@ type DashboardTimeSeriesQuery struct { UnitOverride *string `json:"unitOverride,omitempty"` } +type DashboardTimeSeriesTable struct { + /* Optional. The list of the persistent column settings for the table. */ + // +optional + ColumnSettings []DashboardColumnSettings `json:"columnSettings,omitempty"` + + /* Required. The data displayed in this table. */ + DataSets []DashboardDataSets `json:"dataSets"` + + /* Optional. Store rendering strategy */ + // +optional + MetricVisualization *string `json:"metricVisualization,omitempty"` +} + type DashboardWidget struct { /* A chart of alert policy data. */ // +optional @@ -634,6 +661,10 @@ type DashboardWidget struct { // +optional Text *DashboardText `json:"text,omitempty"` + /* A widget that displays time series data in a tabular format. */ + // +optional + TimeSeriesTable *DashboardTimeSeriesTable `json:"timeSeriesTable,omitempty"` + /* Optional. The title of the widget. */ // +optional Title *string `json:"title,omitempty"` @@ -688,6 +719,10 @@ type DashboardWidgets struct { // +optional Text *DashboardText `json:"text,omitempty"` + /* A widget that displays time series data in a tabular format. */ + // +optional + TimeSeriesTable *DashboardTimeSeriesTable `json:"timeSeriesTable,omitempty"` + /* Optional. The title of the widget. */ // +optional Title *string `json:"title,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 594669b203..0170a30462 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -640,6 +640,22 @@ func (in *DashboardColumnLayout) DeepCopy() *DashboardColumnLayout { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardColumnSettings) DeepCopyInto(out *DashboardColumnSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardColumnSettings. +func (in *DashboardColumnSettings) DeepCopy() *DashboardColumnSettings { + if in == nil { + return nil + } + out := new(DashboardColumnSettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardColumns) DeepCopyInto(out *DashboardColumns) { *out = *in @@ -1251,6 +1267,27 @@ func (in *DashboardStyle) DeepCopy() *DashboardStyle { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardTableDisplayOptions) DeepCopyInto(out *DashboardTableDisplayOptions) { + *out = *in + if in.ShownColumns != nil { + in, out := &in.ShownColumns, &out.ShownColumns + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardTableDisplayOptions. +func (in *DashboardTableDisplayOptions) DeepCopy() *DashboardTableDisplayOptions { + if in == nil { + return nil + } + out := new(DashboardTableDisplayOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardText) DeepCopyInto(out *DashboardText) { *out = *in @@ -1477,6 +1514,39 @@ func (in *DashboardTimeSeriesQuery) DeepCopy() *DashboardTimeSeriesQuery { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardTimeSeriesTable) DeepCopyInto(out *DashboardTimeSeriesTable) { + *out = *in + if in.ColumnSettings != nil { + in, out := &in.ColumnSettings, &out.ColumnSettings + *out = make([]DashboardColumnSettings, len(*in)) + copy(*out, *in) + } + if in.DataSets != nil { + in, out := &in.DataSets, &out.DataSets + *out = make([]DashboardDataSets, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricVisualization != nil { + in, out := &in.MetricVisualization, &out.MetricVisualization + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardTimeSeriesTable. +func (in *DashboardTimeSeriesTable) DeepCopy() *DashboardTimeSeriesTable { + if in == nil { + return nil + } + out := new(DashboardTimeSeriesTable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = *in @@ -1535,6 +1605,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(DashboardText) (*in).DeepCopyInto(*out) } + if in.TimeSeriesTable != nil { + in, out := &in.TimeSeriesTable, &out.TimeSeriesTable + *out = new(DashboardTimeSeriesTable) + (*in).DeepCopyInto(*out) + } if in.Title != nil { in, out := &in.Title, &out.Title *out = new(string) @@ -1616,6 +1691,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(DashboardText) (*in).DeepCopyInto(*out) } + if in.TimeSeriesTable != nil { + in, out := &in.TimeSeriesTable, &out.TimeSeriesTable + *out = new(DashboardTimeSeriesTable) + (*in).DeepCopyInto(*out) + } if in.Title != nil { in, out := &in.Title, &out.Title *out = new(string) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 18d49675f5..83df3d984f 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -717,7 +717,7 @@ func TimeSeriesTable_FromProto(mapCtx *MapContext, in *pb.TimeSeriesTable) *krm. } out := &krm.TimeSeriesTable{} out.DataSets = Slice_FromProto(mapCtx, in.DataSets, TimeSeriesTable_TableDataSet_FromProto) - // MISSING: MetricVisualization + out.MetricVisualization = Enum_FromProto(mapCtx, in.MetricVisualization) out.ColumnSettings = Slice_FromProto(mapCtx, in.ColumnSettings, TimeSeriesTable_ColumnSettings_FromProto) return out } @@ -727,19 +727,11 @@ func TimeSeriesTable_ToProto(mapCtx *MapContext, in *krm.TimeSeriesTable) *pb.Ti } out := &pb.TimeSeriesTable{} out.DataSets = Slice_ToProto(mapCtx, in.DataSets, TimeSeriesTable_TableDataSet_ToProto) - // MISSING: MetricVisualization + out.MetricVisualization = Enum_ToProto[pb.TimeSeriesTable_MetricVisualization](mapCtx, in.MetricVisualization) out.ColumnSettings = Slice_ToProto(mapCtx, in.ColumnSettings, TimeSeriesTable_ColumnSettings_ToProto) return out } -func TimeSeriesTable_ColumnSettings_FromProto(mapCtx *MapContext, in *pb.TimeSeriesTable_ColumnSettings) *krm.TimeSeriesTable_ColumnSettings { - if in == nil { - return nil - } - out := &krm.TimeSeriesTable_ColumnSettings{} - out.Column = LazyPtr(in.GetColumn()) - out.Visible = LazyPtr(in.GetVisible()) - return out -} + func TimeSeriesTable_ColumnSettings_ToProto(mapCtx *MapContext, in *krm.TimeSeriesTable_ColumnSettings) *pb.TimeSeriesTable_ColumnSettings { if in == nil { return nil @@ -782,7 +774,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.Text = Text_FromProto(mapCtx, in.GetText()) out.Blank = Empty_FromProto(mapCtx, in.GetBlank()) out.AlertChart = AlertChart_FromProto(mapCtx, in.GetAlertChart()) - // MISSING: TimeSeriesTable + out.TimeSeriesTable = TimeSeriesTable_FromProto(mapCtx, in.GetTimeSeriesTable()) out.CollapsibleGroup = CollapsibleGroup_FromProto(mapCtx, in.GetCollapsibleGroup()) out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) // MISSING: IncidentList @@ -814,7 +806,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { if oneof := AlertChart_ToProto(mapCtx, in.AlertChart); oneof != nil { out.Content = &pb.Widget_AlertChart{AlertChart: oneof} } - // MISSING: TimeSeriesTable + if oneof := TimeSeriesTable_ToProto(mapCtx, in.TimeSeriesTable); oneof != nil { + out.Content = &pb.Widget_TimeSeriesTable{TimeSeriesTable: oneof} + } if oneof := CollapsibleGroup_ToProto(mapCtx, in.CollapsibleGroup); oneof != nil { out.Content = &pb.Widget_CollapsibleGroup{CollapsibleGroup: oneof} } diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index ea4174143c..fede68452a 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -206,3 +206,17 @@ func TimeSeriesQuery_PrometheusQuery_ToProto(mapCtx *MapContext, in *string) *pb out.PrometheusQuery = *in return out } + +func TimeSeriesTable_ColumnSettings_FromProto(mapCtx *MapContext, in *pb.TimeSeriesTable_ColumnSettings) *krm.TimeSeriesTable_ColumnSettings { + if in == nil { + return nil + } + out := &krm.TimeSeriesTable_ColumnSettings{} + out.Column = LazyPtr(in.GetColumn()) + + // We want to always output the visible field, i.e. `visible: false` + // We probably can automate this, because the visible field is required. + out.Visible = PtrTo(in.GetVisible()) + + return out +} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index dbc496876a..110ddc9fc7 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -104,6 +104,22 @@ spec: - v1 - v2 title: ErrorReporting Widget + - timeSeriesTable: + columnSettings: + - column: column1 + visible: true + - column: column2 + visible: false + dataSets: + - timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: "60" + perSeriesAligner: ALIGN_RATE + filter: metric.type="compute.googleapis.com/instance/disk/read_bytes_count" + resource.type="gce_instance" + metricVisualization: NUMBER + title: TimeSeriesTable Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 5bbd5d642f..6cbe8ca4ef 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -112,6 +112,22 @@ spec: - v1 - v2 title: ErrorReporting Widget + - timeSeriesTable: + columnSettings: + - column: column1 + visible: true + - column: column2 + visible: false + dataSets: + - timeSeriesQuery: + timeSeriesFilter: + aggregation: + alignmentPeriod: 60s + perSeriesAligner: ALIGN_RATE + filter: metric.type="compute.googleapis.com/instance/disk/read_bytes_count" + resource.type="gce_instance" + metricVisualization: NUMBER + title: TimeSeriesTable Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index c24d30bff3..49cf6b2702 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -351,6 +351,34 @@ x-goog-request-params: parent=projects%2F${projectId} ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": 1 + }, + "title": "TimeSeriesTable Widget" } ] } @@ -533,6 +561,34 @@ X-Xss-Protection: 0 ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": "NUMBER" + }, + "title": "TimeSeriesTable Widget" } ] } @@ -723,6 +779,34 @@ X-Xss-Protection: 0 ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": "NUMBER" + }, + "title": "TimeSeriesTable Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 5819ee6d14..41a923a623 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -126,3 +126,18 @@ spec: versions: - v1 - v2 + - title: "TimeSeriesTable Widget" + timeSeriesTable: + columnSettings: + - column: column1 + visible: true + - column: column2 + visible: false + metricVisualization: NUMBER + dataSets: + - timeSeriesQuery: + timeSeriesFilter: + filter: "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + aggregation: + alignmentPeriod: "60s" + perSeriesAligner: "ALIGN_RATE" diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 0fddc16e62..961a9533c5 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -241,6 +241,67 @@ columnLayout: pointerLocation: string textColor: string verticalAlignment: string + timeSeriesTable: + columnSettings: + - column: string + visible: boolean + dataSets: + - minAlignmentPeriod: string + tableDisplayOptions: + shownColumns: + - string + tableTemplate: string + timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + metricVisualization: string title: string xyChart: chartOptions: @@ -476,6 +537,67 @@ gridLayout: pointerLocation: string textColor: string verticalAlignment: string + timeSeriesTable: + columnSettings: + - column: string + visible: boolean + dataSets: + - minAlignmentPeriod: string + tableDisplayOptions: + shownColumns: + - string + tableTemplate: string + timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + metricVisualization: string title: string xyChart: chartOptions: @@ -712,6 +834,67 @@ mosaicLayout: pointerLocation: string textColor: string verticalAlignment: string + timeSeriesTable: + columnSettings: + - column: string + visible: boolean + dataSets: + - minAlignmentPeriod: string + tableDisplayOptions: + shownColumns: + - string + tableTemplate: string + timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + metricVisualization: string title: string xyChart: chartOptions: @@ -956,6 +1139,67 @@ rowLayout: pointerLocation: string textColor: string verticalAlignment: string + timeSeriesTable: + columnSettings: + - column: string + visible: boolean + dataSets: + - minAlignmentPeriod: string + tableDisplayOptions: + shownColumns: + - string + tableTemplate: string + timeSeriesQuery: + outputFullDuration: boolean + prometheusQuery: string + timeSeriesFilter: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesFilterRatio: + denominator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + numerator: + aggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + filter: string + pickTimeSeriesFilter: + direction: string + numTimeSeries: integer + rankingMethod: string + secondaryAggregation: + alignmentPeriod: string + crossSeriesReducer: string + groupByFields: + - string + perSeriesAligner: string + timeSeriesQueryLanguage: string + unitOverride: string + metricVisualization: string title: string xyChart: chartOptions: @@ -3183,57 +3427,67 @@ rowLayout: -

columnLayout.columns[].widgets[].title

+

columnLayout.columns[].widgets[].timeSeriesTable

Optional

-

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+

object

+

{% verbatim %}A widget that displays time series data in a tabular format.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart

+

columnLayout.columns[].widgets[].timeSeriesTable.columnSettings

Optional

-

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

+

list (object)

+

{% verbatim %}Optional. The list of the persistent column settings for the table.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.chartOptions

+

columnLayout.columns[].widgets[].timeSeriesTable.columnSettings[]

Optional

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.chartOptions.mode

-

Optional

+

columnLayout.columns[].widgets[].timeSeriesTable.columnSettings[].column

+

Required*

string

-

{% verbatim %}The chart mode.{% endverbatim %}

+

{% verbatim %}Required. The id of the column.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets

+

columnLayout.columns[].widgets[].timeSeriesTable.columnSettings[].visible

+

Required*

+ + +

boolean

+

{% verbatim %}Required. Whether the column should be visible on page load.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].timeSeriesTable.dataSets

Required*

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+

{% verbatim %}Required. The data displayed in this table.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[]

Required*

@@ -3243,47 +3497,67 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].legendTemplate

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].minAlignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions

Optional

-

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

object

+

{% verbatim %}Optional. Table display options for configuring how the table is rendered.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].plotType

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns

Optional

-

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+

list (string)

+

{% verbatim %}Optional. This field is unused and has been replaced by TimeSeriesTable.column_settings{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery

-

Required*

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns[]

+

Optional

-

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].tableTemplate

+

Optional

+ + +

string

+

{% verbatim %}Optional. A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value i.e. "${resource.labels.project_id}."{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery

+

Optional

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.outputFullDuration

Optional

@@ -3299,7 +3573,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.prometheusQuery

Optional

@@ -3309,7 +3583,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -3319,7 +3593,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -3329,7 +3603,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -3350,7 +3624,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -3373,7 +3647,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -3383,7 +3657,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -3393,7 +3667,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -3418,7 +3692,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -3428,7 +3702,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -3438,7 +3712,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -3448,7 +3722,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -3458,7 +3732,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -3468,7 +3742,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -3478,7 +3752,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -3499,7 +3773,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -3522,7 +3796,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -3532,7 +3806,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -3542,7 +3816,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -3567,7 +3841,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -3577,7 +3851,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -3587,7 +3861,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -3597,7 +3871,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -3618,7 +3892,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -3641,7 +3915,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -3651,7 +3925,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -3661,7 +3935,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -3686,7 +3960,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -3696,7 +3970,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -3706,7 +3980,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -3716,7 +3990,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -3737,7 +4011,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -3760,7 +4034,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -3770,7 +4044,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -3780,7 +4054,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -3805,7 +4079,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -3815,7 +4089,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -3825,7 +4099,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -3835,7 +4109,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -3845,7 +4119,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -3855,7 +4129,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -3865,7 +4139,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -3886,7 +4160,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -3909,7 +4183,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -3919,7 +4193,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -3929,7 +4203,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -3954,7 +4228,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -3964,7 +4238,7 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

columnLayout.columns[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -3974,383 +4248,485 @@ rowLayout: -

columnLayout.columns[].widgets[].xyChart.thresholds

+

columnLayout.columns[].widgets[].timeSeriesTable.metricVisualization

Optional

-

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+

string

+

{% verbatim %}Optional. Store rendering strategy{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[]

+

columnLayout.columns[].widgets[].title

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].color

+

columnLayout.columns[].widgets[].xyChart

Optional

-

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}A chart of time series data.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].direction

+

columnLayout.columns[].widgets[].xyChart.chartOptions

Optional

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}Display options for the chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].label

+

columnLayout.columns[].widgets[].xyChart.chartOptions.mode

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

{% verbatim %}The chart mode.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].targetAxis

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets

+

Required*

-

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.thresholds[].value

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[]

+

Required*

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.timeshiftDuration

+

columnLayout.columns[].widgets[].xyChart.dataSets[].legendTemplate

Optional

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis

+

columnLayout.columns[].widgets[].xyChart.dataSets[].minAlignmentPeriod

Optional

-

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis.label

+

columnLayout.columns[].widgets[].xyChart.dataSets[].plotType

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.xAxis.scale

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery

+

Required*

-

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

Optional

-

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis.label

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

-

columnLayout.columns[].widgets[].xyChart.yAxis.scale

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

-

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

displayName

-

Required

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

-

string

-

{% verbatim %}Required. The mutable, human-readable name.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}Content is arranged with a basic layout that re-flows a simple list of informational elements like widgets or tiles.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.columns

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

-

integer

-

{% verbatim %}The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

-

list (object)

-

{% verbatim %}The informational elements that are arranged into the columns row-first.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

-

object

+

string

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].alertChart

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

-

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

-

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.external

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

-

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.name

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].alertChart.alertPolicyRef.namespace

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

-

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].blank

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

-

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

gridLayout.widgets[].collapsibleGroup

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].collapsibleGroup.collapsed

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

-

object

-

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

-

list (object)

-

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

-

object

-

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs[].external

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs[].kind

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

-

string

-

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs[].name

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

string

-

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.projectRefs[].namespace

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

-

string

-

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.services

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

-

list (string)

-

{% verbatim %}An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.services[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.versions

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

list (string)

-

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].errorReportingPanel.versions[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -4360,213 +4736,211 @@ rowLayout: -

gridLayout.widgets[].id

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

- - - - -

gridLayout.widgets[].logsPanel

-

Optional

- - -

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.filter

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].external

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].kind

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].name

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].logsPanel.resourceNames[].namespace

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].pieChart

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

-

object

-

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].pieChart.chartType

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

string

-

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets

-

Required*

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

-

list (object)

-

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[]

-

Required*

- - -

object

-

{% verbatim %}{% endverbatim %}

- - - - -

gridLayout.widgets[].pieChart.dataSets[].minAlignmentPeriod

-

Optional

- - -

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

- - - - -

gridLayout.widgets[].pieChart.dataSets[].sliceNameTemplate

-

Optional

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

string

-

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

- - - - -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery

-

Required*

- - -

object

-

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

-

boolean

-

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

string

-

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

- - - - -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

-

Optional

- - -

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -4587,7 +4961,7 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -4610,7 +4984,7 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -4620,7 +4994,7 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -4630,7 +5004,7 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -4655,508 +5029,383 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

-

Required*

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

columnLayout.columns[].widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

Optional

-

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

columnLayout.columns[].widgets[].xyChart.thresholds

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

columnLayout.columns[].widgets[].xyChart.thresholds[]

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

columnLayout.columns[].widgets[].xyChart.thresholds[].color

Optional

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

columnLayout.columns[].widgets[].xyChart.thresholds[].direction

Optional

-

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].xyChart.thresholds[].label

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].xyChart.thresholds[].targetAxis

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

columnLayout.columns[].widgets[].xyChart.thresholds[].value

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

columnLayout.columns[].widgets[].xyChart.timeshiftDuration

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

columnLayout.columns[].widgets[].xyChart.xAxis

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

columnLayout.columns[].widgets[].xyChart.xAxis.label

Optional

-

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

columnLayout.columns[].widgets[].xyChart.xAxis.scale

Optional

-

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

columnLayout.columns[].widgets[].xyChart.yAxis

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

columnLayout.columns[].widgets[].xyChart.yAxis.label

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

columnLayout.columns[].widgets[].xyChart.yAxis.scale

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

-

Optional

+

displayName

+

Required

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

string

+

{% verbatim %}Required. The mutable, human-readable name.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

gridLayout

Optional

-

string

-

{% verbatim %}{% endverbatim %}

+

object

+

{% verbatim %}Content is arranged with a basic layout that re-flows a simple list of informational elements like widgets or tiles.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

gridLayout.columns

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

integer

+

{% verbatim %}The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

-

Required*

+

gridLayout.widgets

+

Optional

-

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

list (object)

+

{% verbatim %}The informational elements that are arranged into the columns row-first.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

gridLayout.widgets[]

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

gridLayout.widgets[].alertChart

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

gridLayout.widgets[].alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

gridLayout.widgets[].alertChart.alertPolicyRef.external

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

gridLayout.widgets[].alertChart.alertPolicyRef.name

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

gridLayout.widgets[].alertChart.alertPolicyRef.namespace

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

gridLayout.widgets[].blank

Optional

-

string

-

{% verbatim %}{% endverbatim %}

+

object

+

{% verbatim %}A blank space.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

gridLayout.widgets[].collapsibleGroup

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

-

Required*

+

gridLayout.widgets[].collapsibleGroup.collapsed

+

Optional

-

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

gridLayout.widgets[].errorReportingPanel

Optional

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

gridLayout.widgets[].errorReportingPanel.projectRefs

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

gridLayout.widgets[].errorReportingPanel.projectRefs[]

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

gridLayout.widgets[].errorReportingPanel.projectRefs[].external

Optional

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

gridLayout.widgets[].errorReportingPanel.projectRefs[].kind

Optional

-

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

gridLayout.widgets[].errorReportingPanel.projectRefs[].name

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

gridLayout.widgets[].errorReportingPanel.projectRefs[].namespace

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

gridLayout.widgets[].errorReportingPanel.services

Optional

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

gridLayout.widgets[].errorReportingPanel.services[]

Optional

@@ -5166,249 +5415,187 @@ rowLayout: -

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

gridLayout.widgets[].errorReportingPanel.versions

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

gridLayout.widgets[].errorReportingPanel.versions[]

Optional

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

+

gridLayout.widgets[].id

Optional

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

-

gridLayout.widgets[].pieChart.showLabels

+

gridLayout.widgets[].logsPanel

Optional

-

boolean

-

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+

object

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

-

gridLayout.widgets[].scorecard

+

gridLayout.widgets[].logsPanel.filter

Optional

-

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+

string

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView

+

gridLayout.widgets[].logsPanel.resourceNames

Optional

-

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView.lowerBound

+

gridLayout.widgets[].logsPanel.resourceNames[]

Optional

-

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.gaugeView.upperBound

+

gridLayout.widgets[].logsPanel.resourceNames[].external

Optional

-

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView

+

gridLayout.widgets[].logsPanel.resourceNames[].kind

Optional

-

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

string

+

{% verbatim %}Kind of the referent.{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

gridLayout.widgets[].logsPanel.resourceNames[].name

Optional

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

-

gridLayout.widgets[].scorecard.sparkChartView.sparkChartType

-

Required*

+

gridLayout.widgets[].logsPanel.resourceNames[].namespace

+

Optional

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

- - - - -

gridLayout.widgets[].scorecard.thresholds[]

+

gridLayout.widgets[].pieChart

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].color

-

Optional

+

gridLayout.widgets[].pieChart.chartType

+

Required*

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].direction

-

Optional

+

gridLayout.widgets[].pieChart.dataSets

+

Required*

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].label

-

Optional

+

gridLayout.widgets[].pieChart.dataSets[]

+

Required*

-

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].targetAxis

+

gridLayout.widgets[].pieChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

gridLayout.widgets[].scorecard.thresholds[].value

+

gridLayout.widgets[].pieChart.dataSets[].sliceNameTemplate

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery

Required*

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

Optional

@@ -5424,7 +5611,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

Optional

@@ -5434,7 +5621,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -5444,7 +5631,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -5454,7 +5641,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -5475,7 +5662,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -5498,7 +5685,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -5508,7 +5695,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -5518,7 +5705,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -5543,7 +5730,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -5553,7 +5740,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -5563,7 +5750,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -5573,7 +5760,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -5583,7 +5770,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -5593,7 +5780,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -5603,7 +5790,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -5624,7 +5811,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -5647,7 +5834,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -5657,7 +5844,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -5667,7 +5854,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -5692,7 +5879,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -5702,7 +5889,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -5712,7 +5899,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -5722,7 +5909,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -5743,7 +5930,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -5766,7 +5953,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -5776,7 +5963,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -5786,7 +5973,126 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -5811,27 +6117,3428 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

-

Required*

+

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].pieChart.showLabels

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard

+

Optional

+ + +

object

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView.lowerBound

+

Optional

+ + +

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.gaugeView.upperBound

+

Optional

+ + +

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.sparkChartView.sparkChartType

+

Required*

+ + +

string

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].color

+

Optional

+ + +

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].direction

+

Optional

+ + +

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].label

+

Optional

+ + +

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.thresholds[].value

+

Optional

+ + +

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].scorecard.timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.dividerBelow

+

Optional

+ + +

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+ + + + +

gridLayout.widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + + + +

gridLayout.widgets[].singleViewGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text

+

Optional

+ + +

object

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.content

+

Optional

+ + +

string

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.format

+

Optional

+ + +

string

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style

+

Optional

+ + +

object

+

{% verbatim %}How the text is styled{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.backgroundColor

+

Optional

+ + +

string

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

gridLayout.widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays time series data in a tabular format.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.columnSettings

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The list of the persistent column settings for the table.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.columnSettings[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.columnSettings[].column

+

Required*

+ + +

string

+

{% verbatim %}Required. The id of the column.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.columnSettings[].visible

+

Required*

+ + +

boolean

+

{% verbatim %}Required. Whether the column should be visible on page load.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The data displayed in this table.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[]

+

Required*

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].tableDisplayOptions

+

Optional

+ + +

object

+

{% verbatim %}Optional. Table display options for configuring how the table is rendered.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. This field is unused and has been replaced by TimeSeriesTable.column_settings{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].tableTemplate

+

Optional

+ + +

string

+

{% verbatim %}Optional. A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value i.e. "${resource.labels.project_id}."{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery

+

Optional

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].timeSeriesTable.metricVisualization

+

Optional

+ + +

string

+

{% verbatim %}Optional. Store rendering strategy{% endverbatim %}

+ + + + +

gridLayout.widgets[].title

+

Optional

+ + +

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of time series data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.chartOptions

+

Optional

+ + +

object

+

{% verbatim %}Display options for the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.chartOptions.mode

+

Optional

+ + +

string

+

{% verbatim %}The chart mode.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[]

+

Required*

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].legendTemplate

+

Optional

+ + +

string

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].plotType

+

Optional

+ + +

string

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

Optional

+ + +

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

+ + +

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

Optional

+ + +

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

Optional

+ + +

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

+ + +

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

Optional

+ + +

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

Optional

+ + +

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

Optional

+ + +

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

Optional

+ + +

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

Optional

+ + +

object

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

Optional

+ + +

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

Optional

+ + +

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

Optional

+ + +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].color

+

Optional

+ + +

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].direction

+

Optional

+ + +

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].label

+

Optional

+ + +

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].targetAxis

+

Optional

+ + +

string

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.thresholds[].value

+

Optional

+ + +

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.timeshiftDuration

+

Optional

+ + +

string

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.xAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis

+

Optional

+ + +

object

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis.label

+

Optional

+ + +

string

+

{% verbatim %}The label of the axis.{% endverbatim %}

+ + + + +

gridLayout.widgets[].xyChart.yAxis.scale

+

Optional

+ + +

string

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + + + +

mosaicLayout

+

Optional

+ + +

object

+

{% verbatim %}The content is arranged as a grid of tiles, with each content widget occupying one or more grid blocks.{% endverbatim %}

+ + + + +

mosaicLayout.columns

+

Optional

+ + +

integer

+

{% verbatim %}The number of columns in the mosaic grid. The number of columns must be between 1 and 12, inclusive.{% endverbatim %}

+ + + + +

mosaicLayout.tiles

+

Optional

+ + +

list (object)

+

{% verbatim %}The tiles to display.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].height

+

Optional

+ + +

integer

+

{% verbatim %}The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget

+

Optional

+ + +

object

+

{% verbatim %}The informational widget contained in the tile. For example an `XyChart`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart

+

Optional

+ + +

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef

+

Required*

+ + +

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.external

+

Optional

+ + +

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.blank

+

Optional

+ + +

object

+

{% verbatim %}A blank space.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.collapsibleGroup

+

Optional

+ + +

object

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.collapsibleGroup.collapsed

+

Optional

+ + +

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs

+

Optional

+ + +

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[]

+

Optional

+ + +

object

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].external

+

Optional

+ + +

string

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].kind

+

Optional

+ + +

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].name

+

Optional

+ + +

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].namespace

+

Optional

+ + +

string

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.services

+

Optional

+ + +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. + + Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.services[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.versions

+

Optional

+ + +

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.errorReportingPanel.versions[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.id

+

Optional

+ + +

string

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.filter

+

Optional

+ + +

string

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames

+

Optional

+ + +

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].external

+

Optional

+ + +

string

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].kind

+

Optional

+ + +

string

+

{% verbatim %}Kind of the referent.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.chartType

+

Required*

+ + +

string

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets

+

Required*

+ + +

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[]

+

Required*

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].sliceNameTemplate

+

Optional

+ + +

string

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery

+

Required*

+ + +

object

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -5841,7 +9548,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -5862,7 +9569,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -5885,7 +9592,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -5895,7 +9602,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -5905,7 +9612,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -5930,7 +9637,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -5940,7 +9647,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -5950,7 +9657,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -5960,7 +9667,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -5970,7 +9677,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -5980,17 +9687,17 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -6011,7 +9718,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -6034,7 +9741,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -6044,7 +9751,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -6054,7 +9761,7 @@ rowLayout: -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -6072,320 +9779,153 @@ rowLayout: Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

- - - - -

gridLayout.widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

-

Optional

- - -

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

- - - - -

gridLayout.widgets[].scorecard.timeSeriesQuery.unitOverride

-

Optional

- - -

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

- - - - -

gridLayout.widgets[].sectionHeader

-

Optional

- - -

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

- - - - -

gridLayout.widgets[].sectionHeader.dividerBelow

-

Optional

- - -

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

- - - - -

gridLayout.widgets[].sectionHeader.subtitle

-

Optional

- - -

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

- - - - -

gridLayout.widgets[].singleViewGroup

-

Optional

- - -

object

-

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

- - - - -

gridLayout.widgets[].text

-

Optional

- - -

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

- - - - -

gridLayout.widgets[].text.content

-

Optional

- - -

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

- - - - -

gridLayout.widgets[].text.format

-

Optional

- - -

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style

-

Optional

- - -

object

-

{% verbatim %}How the text is styled{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.backgroundColor

-

Optional

- - -

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.fontSize

-

Optional

- - -

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.horizontalAlignment

-

Optional

- - -

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.padding

-

Optional

- - -

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.pointerLocation

-

Optional

- - -

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.textColor

-

Optional

- - -

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

- - - - -

gridLayout.widgets[].text.style.verticalAlignment

-

Optional

- - -

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

- - - - -

gridLayout.widgets[].title

-

Optional

- - -

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart

-

Optional

- - -

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

- - - - -

gridLayout.widgets[].xyChart.chartOptions

-

Optional

- - -

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

+ `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.chartOptions.mode

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

-

string

-

{% verbatim %}The chart mode.{% endverbatim %}

+

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets

-

Required*

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

Optional

-

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[]

-

Required*

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].legendTemplate

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].minAlignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].plotType

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery

-

Required*

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

Optional

-

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

-

boolean

-

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart{% endverbatim %}

+ Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.prometheusQuery

-

Optional

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

string

-

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -6395,7 +9935,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -6416,7 +9956,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -6439,7 +9979,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -6449,7 +9989,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -6459,7 +9999,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -6484,7 +10024,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -6494,7 +10034,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -6504,7 +10044,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -6514,7 +10054,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -6524,7 +10064,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -6534,17 +10074,17 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -6565,7 +10105,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -6588,7 +10128,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -6598,7 +10138,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -6608,7 +10148,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -6619,160 +10159,274 @@ rowLayout: mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

Optional

+ + +

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.unitOverride

+

Optional

+ + +

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.pieChart.showLabels

+

Optional

+ + +

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard

+

Optional

+ + +

object

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.gaugeView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.gaugeView.lowerBound

+

Optional

+ + +

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.gaugeView.upperBound

+

Optional

+ + +

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView

+

Optional

+ + +

object

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView.minAlignmentPeriod

+

Optional

+ + +

string

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.sparkChartView.sparkChartType

+

Required*

+ + +

string

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.scorecard.thresholds

+

Optional

+ + +

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+ ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

mosaicLayout.tiles[].widget.scorecard.thresholds[]

Optional

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].color

Optional

-

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

string

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].direction

Optional

-

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].label

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].targetAxis

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.thresholds[].value

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery

+

Required*

-

string

-

{% verbatim %}{% endverbatim %}

+

object

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.outputFullDuration

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. +

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+ *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

-

Required*

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.prometheusQuery

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter

Optional

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -6782,7 +10436,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -6803,7 +10457,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -6826,7 +10480,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -6836,7 +10490,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -6846,7 +10500,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -6871,7 +10525,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -6881,7 +10535,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -6891,7 +10545,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -6901,7 +10555,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -6911,7 +10565,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -6921,17 +10575,17 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -6952,7 +10606,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -6975,7 +10629,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -6985,7 +10639,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -6995,7 +10649,7 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -7020,583 +10674,694 @@ rowLayout: -

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio

Optional

-

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

+

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

gridLayout.widgets[].xyChart.dataSets[].timeSeriesQuery.unitOverride

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

-

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].color

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].direction

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].label

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].targetAxis

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.thresholds[].value

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

gridLayout.widgets[].xyChart.timeshiftDuration

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

-

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis.label

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

gridLayout.widgets[].xyChart.xAxis.scale

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

gridLayout.widgets[].xyChart.yAxis

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

gridLayout.widgets[].xyChart.yAxis.label

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

gridLayout.widgets[].xyChart.yAxis.scale

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

- - - - -

mosaicLayout

-

Optional

- - -

object

-

{% verbatim %}The content is arranged as a grid of tiles, with each content widget occupying one or more grid blocks.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

mosaicLayout.columns

-

Optional

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

-

integer

-

{% verbatim %}The number of columns in the mosaic grid. The number of columns must be between 1 and 12, inclusive.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

mosaicLayout.tiles

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

-

list (object)

-

{% verbatim %}The tiles to display.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

mosaicLayout.tiles[]

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

mosaicLayout.tiles[].height

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

integer

-

{% verbatim %}The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1.{% endverbatim %}

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

mosaicLayout.tiles[].widget

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

-

object

-

{% verbatim %}The informational widget contained in the tile. For example an `XyChart`.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef

-

Required*

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

Optional

-

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.external

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.name

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

-

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

mosaicLayout.tiles[].widget.alertChart.alertPolicyRef.namespace

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.blank

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

-

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

mosaicLayout.tiles[].widget.collapsibleGroup

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesQueryLanguage

Optional

-

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

string

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

mosaicLayout.tiles[].widget.collapsibleGroup.collapsed

+

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.unitOverride

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

string

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel

+

mosaicLayout.tiles[].widget.sectionHeader

Optional

object

-

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs

+

mosaicLayout.tiles[].widget.sectionHeader.dividerBelow

Optional

-

list (object)

-

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+

boolean

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[]

+

mosaicLayout.tiles[].widget.sectionHeader.subtitle

Optional

-

object

-

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].external

+

mosaicLayout.tiles[].widget.singleViewGroup

Optional

-

string

-

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+

object

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].kind

+

mosaicLayout.tiles[].widget.text

Optional

-

string

-

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+

object

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].name

+

mosaicLayout.tiles[].widget.text.content

Optional

string

-

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.projectRefs[].namespace

+

mosaicLayout.tiles[].widget.text.format

Optional

string

-

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.services

+

mosaicLayout.tiles[].widget.text.style

Optional

-

list (string)

-

{% verbatim %}An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. - - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+

object

+

{% verbatim %}How the text is styled{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.services[]

+

mosaicLayout.tiles[].widget.text.style.backgroundColor

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.versions

+

mosaicLayout.tiles[].widget.text.style.fontSize

Optional

-

list (string)

-

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

-

mosaicLayout.tiles[].widget.errorReportingPanel.versions[]

+

mosaicLayout.tiles[].widget.text.style.horizontalAlignment

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

-

mosaicLayout.tiles[].widget.id

+

mosaicLayout.tiles[].widget.text.style.padding

Optional

string

-

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel

+

mosaicLayout.tiles[].widget.text.style.pointerLocation

Optional

-

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.filter

+

mosaicLayout.tiles[].widget.text.style.textColor

Optional

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames

+

mosaicLayout.tiles[].widget.text.style.verticalAlignment

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[]

+

mosaicLayout.tiles[].widget.timeSeriesTable

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}A widget that displays time series data in a tabular format.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].external

+

mosaicLayout.tiles[].widget.timeSeriesTable.columnSettings

Optional

-

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

list (object)

+

{% verbatim %}Optional. The list of the persistent column settings for the table.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].kind

+

mosaicLayout.tiles[].widget.timeSeriesTable.columnSettings[]

Optional

-

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].name

-

Optional

+

mosaicLayout.tiles[].widget.timeSeriesTable.columnSettings[].column

+

Required*

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

{% verbatim %}Required. The id of the column.{% endverbatim %}

-

mosaicLayout.tiles[].widget.logsPanel.resourceNames[].namespace

-

Optional

+

mosaicLayout.tiles[].widget.timeSeriesTable.columnSettings[].visible

+

Required*

-

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

boolean

+

{% verbatim %}Required. Whether the column should be visible on page load.{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart

-

Optional

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets

+

Required*

-

object

-

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The data displayed in this table.{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.chartType

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[]

Required*

-

string

-

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets

-

Required*

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].minAlignmentPeriod

+

Optional

-

list (object)

-

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets[]

-

Required*

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].tableDisplayOptions

+

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Optional. Table display options for configuring how the table is rendered.{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets[].minAlignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. This field is unused and has been replaced by TimeSeriesTable.column_settings{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns[]

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets[].sliceNameTemplate

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].tableTemplate

Optional

string

-

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

+

{% verbatim %}Optional. A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value i.e. "${resource.labels.project_id}."{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery

-

Required*

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery

+

Optional

object

-

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.outputFullDuration

Optional

@@ -7612,7 +11377,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.prometheusQuery

Optional

@@ -7622,7 +11387,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -7632,7 +11397,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -7642,7 +11407,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -7663,7 +11428,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -7686,7 +11451,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -7696,7 +11461,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -7706,7 +11471,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -7731,7 +11496,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -7741,7 +11506,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -7751,7 +11516,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -7761,7 +11526,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -7771,7 +11536,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -7781,7 +11546,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -7791,7 +11556,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -7812,7 +11577,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -7835,7 +11600,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -7845,7 +11610,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -7855,7 +11620,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -7880,7 +11645,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -7890,7 +11655,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -7900,7 +11665,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -7910,7 +11675,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -7931,7 +11696,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -7954,7 +11719,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -7964,7 +11729,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -7974,7 +11739,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -7999,7 +11764,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -8009,7 +11774,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -8019,7 +11784,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -8029,7 +11794,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -8050,7 +11815,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -8073,7 +11838,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -8083,7 +11848,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -8093,7 +11858,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -8118,7 +11883,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -8128,7 +11893,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -8138,7 +11903,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -8148,7 +11913,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -8158,7 +11923,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -8168,7 +11933,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -8178,7 +11943,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -8199,7 +11964,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -8222,7 +11987,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -8232,7 +11997,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -8242,7 +12007,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -8267,7 +12032,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -8277,7 +12042,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.dataSets[].timeSeriesQuery.unitOverride

+

mosaicLayout.tiles[].widget.timeSeriesTable.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -8287,194 +12052,107 @@ rowLayout: -

mosaicLayout.tiles[].widget.pieChart.showLabels

+

mosaicLayout.tiles[].widget.timeSeriesTable.metricVisualization

Optional

-

boolean

-

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+

string

+

{% verbatim %}Optional. Store rendering strategy{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard

+

mosaicLayout.tiles[].widget.title

Optional

-

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+

string

+

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.gaugeView

+

mosaicLayout.tiles[].widget.xyChart

Optional

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.gaugeView.lowerBound

-

Optional

- - -

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.gaugeView.upperBound

-

Optional

- - -

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

{% verbatim %}A chart of time series data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView

+

mosaicLayout.tiles[].widget.xyChart.chartOptions

Optional

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

{% verbatim %}Display options for the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView.minAlignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.chartOptions.mode

Optional

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

{% verbatim %}The chart mode.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.sparkChartView.sparkChartType

+

mosaicLayout.tiles[].widget.xyChart.dataSets

Required*

-

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.thresholds[]

-

Optional

- - -

object

-

{% verbatim %}{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.scorecard.thresholds[].color

-

Optional

- - -

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].direction

-

Optional

+

mosaicLayout.tiles[].widget.xyChart.dataSets[]

+

Required*

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].label

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].legendTemplate

Optional

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].targetAxis

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.thresholds[].value

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].plotType

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery

Required*

@@ -8484,7 +12162,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.outputFullDuration

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.outputFullDuration

Optional

@@ -8500,7 +12178,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.prometheusQuery

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.prometheusQuery

Optional

@@ -8510,7 +12188,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -8520,7 +12198,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -8530,7 +12208,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -8551,7 +12229,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -8574,7 +12252,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -8584,7 +12262,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -8594,7 +12272,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -8619,7 +12297,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -8629,7 +12307,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -8639,7 +12317,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -8649,7 +12327,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -8659,7 +12337,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -8669,7 +12347,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -8679,7 +12357,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -8700,7 +12378,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -8723,7 +12401,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -8733,7 +12411,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -8743,7 +12421,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -8768,7 +12446,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -8778,7 +12456,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -8788,7 +12466,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -8798,7 +12476,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -8819,7 +12497,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -8842,7 +12520,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -8852,7 +12530,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -8862,7 +12540,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -8887,7 +12565,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -8897,7 +12575,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -8907,7 +12585,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -8917,7 +12595,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -8938,7 +12616,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -8961,7 +12639,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -8971,7 +12649,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -8981,7 +12659,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -9006,7 +12684,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -9016,7 +12694,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -9026,7 +12704,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -9036,7 +12714,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -9046,7 +12724,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -9056,7 +12734,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -9066,7 +12744,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -9087,7 +12765,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -9110,7 +12788,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -9120,7 +12798,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -9130,7 +12808,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -9155,7 +12833,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -9165,7 +12843,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.scorecard.timeSeriesQuery.unitOverride

+

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -9175,839 +12853,699 @@ rowLayout: -

mosaicLayout.tiles[].widget.sectionHeader

+

mosaicLayout.tiles[].widget.xyChart.thresholds

Optional

-

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+

list (object)

+

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.sectionHeader.dividerBelow

+

mosaicLayout.tiles[].widget.xyChart.thresholds[]

Optional

-

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.sectionHeader.subtitle

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].color

Optional

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.singleViewGroup

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].direction

Optional

-

object

-

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

+

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].label

Optional

-

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

+

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.content

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].targetAxis

Optional

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.format

+

mosaicLayout.tiles[].widget.xyChart.thresholds[].value

Optional

-

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style

+

mosaicLayout.tiles[].widget.xyChart.timeshiftDuration

Optional

-

object

-

{% verbatim %}How the text is styled{% endverbatim %}

+

string

+

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.backgroundColor

+

mosaicLayout.tiles[].widget.xyChart.xAxis

Optional

-

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

object

+

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.fontSize

+

mosaicLayout.tiles[].widget.xyChart.xAxis.label

Optional

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.horizontalAlignment

+

mosaicLayout.tiles[].widget.xyChart.xAxis.scale

Optional

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.padding

+

mosaicLayout.tiles[].widget.xyChart.yAxis

Optional

-

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+

object

+

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.pointerLocation

+

mosaicLayout.tiles[].widget.xyChart.yAxis.label

Optional

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+

{% verbatim %}The label of the axis.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.textColor

+

mosaicLayout.tiles[].widget.xyChart.yAxis.scale

Optional

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

-

mosaicLayout.tiles[].widget.text.style.verticalAlignment

+

mosaicLayout.tiles[].width

Optional

-

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+

integer

+

{% verbatim %}The width of the tile, measured in grid blocks. Tiles must have a minimum width of 1.{% endverbatim %}

-

mosaicLayout.tiles[].widget.title

+

mosaicLayout.tiles[].xPos

Optional

-

string

-

{% verbatim %}Optional. The title of the widget.{% endverbatim %}

+

integer

+

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the left edge of the grid. Tiles must be contained within the specified number of columns. `x_pos` cannot be negative.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart

+

mosaicLayout.tiles[].yPos

Optional

-

object

-

{% verbatim %}A chart of time series data.{% endverbatim %}

+

integer

+

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the top edge of the grid. `y_pos` cannot be negative.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.chartOptions

-

Optional

+

projectRef

+

Required

object

-

{% verbatim %}Display options for the chart.{% endverbatim %}

+

{% verbatim %}Immutable. The Project that this resource belongs to.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.chartOptions.mode

+

projectRef.external

Optional

string

-

{% verbatim %}The chart mode.{% endverbatim %}

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets

-

Required*

+

projectRef.kind

+

Optional

-

list (object)

-

{% verbatim %}Required. The data displayed in this chart.{% endverbatim %}

+

string

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[]

-

Required*

+

projectRef.name

+

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].legendTemplate

+

projectRef.namespace

Optional

string

-

{% verbatim %}A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value.{% endverbatim %}

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].minAlignmentPeriod

+

resourceID

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].plotType

+

rowLayout

Optional

-

string

-

{% verbatim %}How this data should be plotted on the chart.{% endverbatim %}

+

object

+

{% verbatim %}The content is divided into equally spaced rows and the widgets are arranged horizontally.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery

-

Required*

+

rowLayout.rows

+

Optional

-

object

-

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

+

list (object)

+

{% verbatim %}The rows of content to display.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.outputFullDuration

+

rowLayout.rows[]

Optional

-

boolean

-

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as - the alignment period so that there will be only 1 output value. - - *Note: This could override the configured alignment period except for - the cases where a series of data points are expected, like - - XyChart - - Scorecard's spark chart{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.prometheusQuery

+

rowLayout.rows[].weight

Optional

-

string

-

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

+

integer

+

{% verbatim %}The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

rowLayout.rows[].widgets

Optional

-

object

-

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

+

list (object)

+

{% verbatim %}The display widgets arranged horizontally in this row.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

rowLayout.rows[].widgets[]

Optional

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].alertChart

Optional

-

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

object

+

{% verbatim %}A chart of alert policy data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

-

Optional

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef

+

Required*

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

object

+

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.external

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

string

+

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.name

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.namespace

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

-

Required*

+

rowLayout.rows[].widgets[].blank

+

Optional

-

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

object

+

{% verbatim %}A blank space.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].collapsibleGroup

Optional

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].collapsibleGroup.collapsed

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

boolean

+

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].errorReportingPanel

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

object

+

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs

Optional

-

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

list (object)

+

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[]

Optional

object

-

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

+

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].external

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].kind

Optional

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].name

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

string

+

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].namespace

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].errorReportingPanel.services

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. +

list (string)

+

{% verbatim %}An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This field is expected + to have a low number of values that are relatively stable over time, as + opposed to `version`, which can be changed whenever new code is deployed. - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+ Contains the service name for error reports extracted from Google + App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

rowLayout.rows[].widgets[].errorReportingPanel.services[]

Optional

-

object

-

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

rowLayout.rows[].widgets[].errorReportingPanel.versions

Optional

-

object

-

{% verbatim %}The denominator of the ratio.{% endverbatim %}

+

list (string)

+

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

rowLayout.rows[].widgets[].errorReportingPanel.versions[]

Optional

-

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].id

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].logsPanel

Optional

-

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

object

+

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].logsPanel.filter

Optional

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

string

+

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].logsPanel.resourceNames

Optional

-

string

-

{% verbatim %}{% endverbatim %}

+

list (object)

+

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[]

Optional

-

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

object

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

-

Required*

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].external

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}The external name of the referenced resource{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].kind

Optional

-

object

-

{% verbatim %}The numerator of the ratio.{% endverbatim %}

+

string

+

{% verbatim %}Kind of the referent.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].name

Optional

-

object

-

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].logsPanel.resourceNames[].namespace

Optional

string

-

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used - to divide the data in all the - [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - time. This will be done before the per-series aligner can be applied to - the data. - - The value must be at least 60 seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required or an error is returned. - If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - - The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].pieChart

Optional

+ +

object

+

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].pieChart.chartType

+

Required*

+

string

-

{% verbatim %}The reduction operation to be used to combine time series into a single - time series, where the value of each data point in the resulting series is - a function of all the already aligned values in the input time series. - - Not all reducer operations can be applied to all time series. The valid - choices depend on the `metric_kind` and the `value_type` of the original - time series. Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input time series. - - Time series data must first be aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - error is returned.{% endverbatim %}

+

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets

+

Required*

-

list (string)

-

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

+

list (object)

+

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[]

+

Required*

-

string

+

object

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].pieChart.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}An `Aligner` describes how to bring the data points in a single - time series into temporal alignment. Except for `ALIGN_NONE`, all - alignments cause all the data points in an `alignment_period` to be - mathematically grouped together, resulting in a single data point for - each `alignment_period` with end timestamp at the end of the period. - - Not all alignment operations may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` of the original time - series. Alignment can change the `metric_kind` or the `value_type` of - the time series. - - Time series data must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; otherwise, an error is - returned.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

-

Required*

+

rowLayout.rows[].widgets[].pieChart.dataSets[].sliceNameTemplate

+

Optional

string

-

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

+

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery

+

Required*

object

-

{% verbatim %}Ranking based time series filter.{% endverbatim %}

+

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

Optional

-

string

-

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

+

boolean

+

{% verbatim %}Optional. If set, Cloud Monitoring will treat the full query duration as + the alignment period so that there will be only 1 output value. + + *Note: This could override the configured alignment period except for + the cases where a series of data points are expected, like + - XyChart + - Scorecard's spark chart{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

Optional

-

integer

-

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

+

string

+

{% verbatim %}A query used to fetch time series with PromQL.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

-

string

-

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

+

object

+

{% verbatim %}Filter parameters to fetch time series.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

object

-

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -10028,7 +13566,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -10051,7 +13589,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -10061,7 +13599,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -10071,7 +13609,7 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -10096,673 +13634,760 @@ rowLayout: -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

-

Optional

- - -

string

-

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.dataSets[].timeSeriesQuery.unitOverride

-

Optional

- - -

string

-

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds

-

Optional

- - -

list (object)

-

{% verbatim %}Threshold lines drawn horizontally across the chart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[]

-

Optional

- - -

object

-

{% verbatim %}{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].color

-

Optional

- - -

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].direction

-

Optional

- - -

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].label

-

Optional

- - -

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].targetAxis

-

Optional

- - -

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

- - - - -

mosaicLayout.tiles[].widget.xyChart.thresholds[].value

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

Required*

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.timeshiftDuration

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

-

string

-

{% verbatim %}The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.{% endverbatim %}

+

object

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.xAxis

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

-

object

-

{% verbatim %}The properties applied to the x-axis.{% endverbatim %}

+

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.xAxis.label

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

-

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.xAxis.scale

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.yAxis

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

object

-

{% verbatim %}The properties applied to the y-axis.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after `aggregation` is applied.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.yAxis.label

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The label of the axis.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

mosaicLayout.tiles[].widget.xyChart.yAxis.scale

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

mosaicLayout.tiles[].width

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

-

integer

-

{% verbatim %}The width of the tile, measured in grid blocks. Tiles must have a minimum width of 1.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

mosaicLayout.tiles[].xPos

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

-

integer

-

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the left edge of the grid. Tiles must be contained within the specified number of columns. `x_pos` cannot be negative.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

mosaicLayout.tiles[].yPos

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

-

integer

-

{% verbatim %}The zero-indexed position of the tile in grid blocks relative to the top edge of the grid. `y_pos` cannot be negative.{% endverbatim %}

- - - - -

projectRef

-

Required

- - -

object

-

{% verbatim %}Immutable. The Project that this resource belongs to.{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

projectRef.external

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

-

string

-

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+

object

+

{% verbatim %}Parameters to fetch a ratio between two time series filters.{% endverbatim %}

-

projectRef.kind

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

-

string

-

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+

object

+

{% verbatim %}The denominator of the ratio.{% endverbatim %}

-

projectRef.name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

-

string

-

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+

object

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

projectRef.namespace

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

resourceID

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

rowLayout

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

-

object

-

{% verbatim %}The content is divided into equally spaced rows and the widgets are arranged horizontally.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

rowLayout.rows

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

-

list (object)

-

{% verbatim %}The rows of content to display.{% endverbatim %}

+

string

+

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

rowLayout.rows[].weight

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

Required*

-

integer

-

{% verbatim %}The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

rowLayout.rows[].widgets

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

-

list (object)

-

{% verbatim %}The display widgets arranged horizontally in this row.{% endverbatim %}

+

object

+

{% verbatim %}The numerator of the ratio.{% endverbatim %}

-

rowLayout.rows[].widgets[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

object

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

-

object

-

{% verbatim %}A chart of alert policy data.{% endverbatim %}

+

string

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef

-

Required*

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

Optional

-

object

-

{% verbatim %}Required. A reference to the MonitoringAlertPolicy.{% endverbatim %}

+

string

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.external

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

-

string

-

{% verbatim %}The MonitoringAlertPolicy link in the form "projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]", when not managed by KCC.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

string

-

{% verbatim %}The `name` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].alertChart.alertPolicyRef.namespace

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

string

-

{% verbatim %}The `namespace` field of a `MonitoringAlertPolicy` resource.{% endverbatim %}

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. + + Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].blank

-

Optional

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

Required*

-

object

-

{% verbatim %}A blank space.{% endverbatim %}

+

string

+

{% verbatim %}Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.{% endverbatim %}

-

rowLayout.rows[].widgets[].collapsibleGroup

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

object

-

{% verbatim %}A widget that groups the other widgets. All widgets that are within the area spanned by the grouping widget are considered member widgets.{% endverbatim %}

+

{% verbatim %}Ranking based time series filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].collapsibleGroup.collapsed

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

-

boolean

-

{% verbatim %}The collapsed state of the widget on first page load.{% endverbatim %}

+

string

+

{% verbatim %}How to use the ranking to select time series that pass through the filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

-

object

-

{% verbatim %}A widget that displays a list of error groups.{% endverbatim %}

+

integer

+

{% verbatim %}How many time series to allow to pass through the filter.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

-

list (object)

-

{% verbatim %}The projects from which to gather errors.{% endverbatim %}

+

string

+

{% verbatim %}`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

object

-

{% verbatim %}The Project that this resource belongs to.{% endverbatim %}

+

{% verbatim %}Apply a second aggregation after the ratio is computed.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].external

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

string

-

{% verbatim %}The `projectID` field of a project, when not managed by KCC.{% endverbatim %}

+

{% verbatim %}The `alignment_period` specifies a time interval, in seconds, that is used + to divide the data in all the + [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + time. This will be done before the per-series aligner can be applied to + the data. + + The value must be at least 60 seconds. If a per-series aligner other than + `ALIGN_NONE` is specified, this field is required or an error is returned. + If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + specified, then this field is ignored. + + The maximum value of the `alignment_period` is 2 years, or 104 weeks.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].kind

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

string

-

{% verbatim %}The kind of the Project resource; optional but must be `Project` if provided.{% endverbatim %}

+

{% verbatim %}The reduction operation to be used to combine time series into a single + time series, where the value of each data point in the resulting series is + a function of all the already aligned values in the input time series. + + Not all reducer operations can be applied to all time series. The valid + choices depend on the `metric_kind` and the `value_type` of the original + time series. Reduction can yield a time series with a different + `metric_kind` or `value_type` than the input time series. + + Time series data must first be aligned (see `per_series_aligner`) in order + to perform cross-time series reduction. If `cross_series_reducer` is + specified, then `per_series_aligner` must be specified, and must not be + `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + error is returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].name

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

-

string

-

{% verbatim %}The `name` field of a `Project` resource.{% endverbatim %}

+

list (string)

+

{% verbatim %}The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.projectRefs[].namespace

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

string

-

{% verbatim %}The `namespace` field of a `Project` resource.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.services

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

-

list (string)

-

{% verbatim %}An identifier of the service, such as the name of the - executable, job, or Google App Engine service name. This field is expected - to have a low number of values that are relatively stable over time, as - opposed to `version`, which can be changed whenever new code is deployed. +

string

+

{% verbatim %}An `Aligner` describes how to bring the data points in a single + time series into temporal alignment. Except for `ALIGN_NONE`, all + alignments cause all the data points in an `alignment_period` to be + mathematically grouped together, resulting in a single data point for + each `alignment_period` with end timestamp at the end of the period. - Contains the service name for error reports extracted from Google - App Engine logs or `default` if the App Engine default service is used.{% endverbatim %}

+ Not all alignment operations may be applied to all time series. The valid + choices depend on the `metric_kind` and `value_type` of the original time + series. Alignment can change the `metric_kind` or the `value_type` of + the time series. + + Time series data must be aligned in order to perform cross-time + series reduction. If `cross_series_reducer` is specified, then + `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + and `alignment_period` must be specified; otherwise, an error is + returned.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.services[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

string

-

{% verbatim %}{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].errorReportingPanel.versions

-

Optional

- - -

list (string)

-

{% verbatim %}Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app.{% endverbatim %}

+

{% verbatim %}A query used to fetch time series with MQL.{% endverbatim %}

-

rowLayout.rows[].widgets[].errorReportingPanel.versions[]

+

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

Optional

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.{% endverbatim %}

-

rowLayout.rows[].widgets[].id

+

rowLayout.rows[].widgets[].pieChart.showLabels

Optional

-

string

-

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+

boolean

+

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel

+

rowLayout.rows[].widgets[].scorecard

Optional

object

-

{% verbatim %}A widget that shows a stream of logs.{% endverbatim %}

+

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.filter

+

rowLayout.rows[].widgets[].scorecard.gaugeView

Optional

-

string

-

{% verbatim %}A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.{% endverbatim %}

+

object

+

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames

+

rowLayout.rows[].widgets[].scorecard.gaugeView.lowerBound

Optional

-

list (object)

-

{% verbatim %}The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.{% endverbatim %}

+

float

+

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[]

+

rowLayout.rows[].widgets[].scorecard.gaugeView.upperBound

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

float

+

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].external

+

rowLayout.rows[].widgets[].scorecard.sparkChartView

Optional

-

string

-

{% verbatim %}The external name of the referenced resource{% endverbatim %}

+

object

+

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].kind

+

rowLayout.rows[].widgets[].scorecard.sparkChartView.minAlignmentPeriod

Optional

string

-

{% verbatim %}Kind of the referent.{% endverbatim %}

+

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].name

-

Optional

+

rowLayout.rows[].widgets[].scorecard.sparkChartView.sparkChartType

+

Required*

string

-

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

-

rowLayout.rows[].widgets[].logsPanel.resourceNames[].namespace

+

rowLayout.rows[].widgets[].scorecard.thresholds

Optional

-

string

-

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

list (object)

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the + time series' current value. For an actual value x, the scorecard is in a + danger state if x is less than or equal to a danger threshold that triggers + below, or greater than or equal to a danger threshold that triggers above. + Similarly, if x is above/below a warning threshold that triggers + above/below, then the scorecard is in a warning state - unless x also puts + it in a danger state. (Danger trumps warning.) + + As an example, consider a scorecard with the following four thresholds: + + ``` + { + value: 90, + category: 'DANGER', + trigger: 'ABOVE', + }, + { + value: 70, + category: 'WARNING', + trigger: 'ABOVE', + }, + { + value: 10, + category: 'DANGER', + trigger: 'BELOW', + }, + { + value: 20, + category: 'WARNING', + trigger: 'BELOW', + } + ``` + + Then: values less than or equal to 10 would put the scorecard in a DANGER + state, values greater than 10 but less than or equal to 20 a WARNING state, + values strictly between 20 and 70 an OK state, values greater than or equal + to 70 but less than 90 a WARNING state, and values greater than or equal to + 90 a DANGER state.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart

+

rowLayout.rows[].widgets[].scorecard.thresholds[]

Optional

object

-

{% verbatim %}A widget that displays timeseries data as a pie chart.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.chartType

-

Required*

+

rowLayout.rows[].widgets[].scorecard.thresholds[].color

+

Optional

string

-

{% verbatim %}Required. Indicates the visualization type for the PieChart.{% endverbatim %}

+

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets

-

Required*

+

rowLayout.rows[].widgets[].scorecard.thresholds[].direction

+

Optional

-

list (object)

-

{% verbatim %}Required. The queries for the chart's data.{% endverbatim %}

+

string

+

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets[]

-

Required*

+

rowLayout.rows[].widgets[].scorecard.thresholds[].label

+

Optional

-

object

-

{% verbatim %}{% endverbatim %}

+

string

+

{% verbatim %}A label for the threshold.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets[].minAlignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.thresholds[].targetAxis

Optional

string

-

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

+

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets[].sliceNameTemplate

+

rowLayout.rows[].widgets[].scorecard.thresholds[].value

Optional

-

string

-

{% verbatim %}Optional. A template for the name of the slice. This name will be displayed in the legend and the tooltip of the pie chart. It replaces the auto-generated names for the slices. For example, if the template is set to `${resource.labels.zone}`, the zone's value will be used for the name instead of the default name.{% endverbatim %}

+

float

+

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery

Required*

object

-

{% verbatim %}Required. The query for the PieChart. See, `google.monitoring.dashboard.v1.TimeSeriesQuery`.{% endverbatim %}

+

{% verbatim %}Required. Fields for querying time series data from the Stackdriver metrics API.{% endverbatim %}

-

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.outputFullDuration

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.outputFullDuration

Optional

@@ -10778,7 +14403,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.prometheusQuery

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.prometheusQuery

Optional

@@ -10788,7 +14413,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

Optional

@@ -10798,7 +14423,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -10808,7 +14433,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -10829,7 +14454,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -10852,7 +14477,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -10862,7 +14487,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -10872,7 +14497,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -10897,7 +14522,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -10907,7 +14532,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -10917,7 +14542,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -10927,7 +14552,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -10937,7 +14562,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -10947,7 +14572,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -10957,7 +14582,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -10978,7 +14603,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -11001,7 +14626,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -11011,7 +14636,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -11021,7 +14646,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -11046,7 +14671,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -11056,7 +14681,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -11066,7 +14691,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -11076,7 +14701,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -11097,7 +14722,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -11120,7 +14745,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -11130,7 +14755,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -11140,7 +14765,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -11165,7 +14790,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -11175,7 +14800,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -11185,7 +14810,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -11195,7 +14820,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -11216,7 +14841,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -11239,7 +14864,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -11249,7 +14874,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -11259,7 +14884,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -11284,7 +14909,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -11294,7 +14919,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -11304,7 +14929,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -11314,7 +14939,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -11324,7 +14949,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -11334,7 +14959,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -11344,7 +14969,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -11365,7 +14990,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -11388,7 +15013,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -11398,7 +15023,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -11408,7 +15033,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -11433,7 +15058,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -11443,7 +15068,7 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.dataSets[].timeSeriesQuery.unitOverride

+

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.unitOverride

Optional

@@ -11453,135 +15078,218 @@ rowLayout: -

rowLayout.rows[].widgets[].pieChart.showLabels

+

rowLayout.rows[].widgets[].sectionHeader

+

Optional

+ + +

object

+

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].sectionHeader.dividerBelow

Optional

boolean

-

{% verbatim %}Optional. Indicates whether or not the pie chart should show slices' labels{% endverbatim %}

+

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard

+

rowLayout.rows[].widgets[].sectionHeader.subtitle

+

Optional

+ + +

string

+

{% verbatim %}The subtitle of the section{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].singleViewGroup

Optional

object

-

{% verbatim %}A scorecard summarizing time series data.{% endverbatim %}

+

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.gaugeView

+

rowLayout.rows[].widgets[].text

Optional

object

-

{% verbatim %}Will cause the scorecard to show a gauge chart.{% endverbatim %}

+

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.gaugeView.lowerBound

+

rowLayout.rows[].widgets[].text.content

Optional

-

float

-

{% verbatim %}The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}The text content to be displayed.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.gaugeView.upperBound

+

rowLayout.rows[].widgets[].text.format

Optional

-

float

-

{% verbatim %}The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.{% endverbatim %}

+

string

+

{% verbatim %}How the text content is formatted.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.sparkChartView

+

rowLayout.rows[].widgets[].text.style

Optional

object

-

{% verbatim %}Will cause the scorecard to show a spark chart.{% endverbatim %}

+

{% verbatim %}How the text is styled{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.sparkChartView.minAlignmentPeriod

+

rowLayout.rows[].widgets[].text.style.backgroundColor

Optional

string

-

{% verbatim %}The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.{% endverbatim %}

+

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.sparkChartView.sparkChartType

+

rowLayout.rows[].widgets[].text.style.fontSize

+

Optional

+ + +

string

+

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.horizontalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.padding

+

Optional

+ + +

string

+

{% verbatim %}The amount of padding around the widget{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.pointerLocation

+

Optional

+ + +

string

+

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.textColor

+

Optional

+ + +

string

+

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].text.style.verticalAlignment

+

Optional

+ + +

string

+

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].timeSeriesTable

+

Optional

+ + +

object

+

{% verbatim %}A widget that displays time series data in a tabular format.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].timeSeriesTable.columnSettings

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The list of the persistent column settings for the table.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].timeSeriesTable.columnSettings[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].timeSeriesTable.columnSettings[].column

Required*

string

-

{% verbatim %}Required. The type of sparkchart to show in this chartView.{% endverbatim %}

+

{% verbatim %}Required. The id of the column.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].timeSeriesTable.columnSettings[].visible

+

Required*

+ + +

boolean

+

{% verbatim %}Required. Whether the column should be visible on page load.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds

-

Optional

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets

+

Required*

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the - time series' current value. For an actual value x, the scorecard is in a - danger state if x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that triggers above. - Similarly, if x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - unless x also puts - it in a danger state. (Danger trumps warning.) - - As an example, consider a scorecard with the following four thresholds: - - ``` - { - value: 90, - category: 'DANGER', - trigger: 'ABOVE', - }, - { - value: 70, - category: 'WARNING', - trigger: 'ABOVE', - }, - { - value: 10, - category: 'DANGER', - trigger: 'BELOW', - }, - { - value: 20, - category: 'WARNING', - trigger: 'BELOW', - } - ``` - - Then: values less than or equal to 10 would put the scorecard in a DANGER - state, values greater than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values greater than or equal to - 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}Required. The data displayed in this table.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds[]

-

Optional

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[]

+

Required*

object

@@ -11590,58 +15298,58 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.thresholds[].color

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].minAlignmentPeriod

Optional

string

-

{% verbatim %}The state color for this threshold. Color is not allowed in a XyChart.{% endverbatim %}

+

{% verbatim %}Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds[].direction

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions

Optional

-

string

-

{% verbatim %}The direction for the current threshold. Direction is not allowed in a XyChart.{% endverbatim %}

+

object

+

{% verbatim %}Optional. Table display options for configuring how the table is rendered.{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds[].label

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns

Optional

-

string

-

{% verbatim %}A label for the threshold.{% endverbatim %}

+

list (string)

+

{% verbatim %}Optional. This field is unused and has been replaced by TimeSeriesTable.column_settings{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds[].targetAxis

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].tableDisplayOptions.shownColumns[]

Optional

string

-

{% verbatim %}The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard.{% endverbatim %}

+

{% verbatim %}{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.thresholds[].value

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].tableTemplate

Optional

-

float

-

{% verbatim %}The value of the threshold. The value should be defined in the native scale of the metric.{% endverbatim %}

+

string

+

{% verbatim %}Optional. A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value i.e. "${resource.labels.project_id}."{% endverbatim %}

-

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery

-

Required*

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery

+

Optional

object

@@ -11650,7 +15358,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.outputFullDuration

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.outputFullDuration

Optional

@@ -11666,7 +15374,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.prometheusQuery

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.prometheusQuery

Optional

@@ -11676,7 +15384,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter

Optional

@@ -11686,7 +15394,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation

Optional

@@ -11696,7 +15404,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.alignmentPeriod

Optional

@@ -11717,7 +15425,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.crossSeriesReducer

Optional

@@ -11740,7 +15448,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields

Optional

@@ -11750,7 +15458,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.groupByFields[]

Optional

@@ -11760,7 +15468,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.aggregation.perSeriesAligner

Optional

@@ -11785,7 +15493,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.filter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.filter

Required*

@@ -11795,7 +15503,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter

Optional

@@ -11805,7 +15513,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.direction

Optional

@@ -11815,7 +15523,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -11825,7 +15533,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.pickTimeSeriesFilter.rankingMethod

Optional

@@ -11835,7 +15543,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation

Optional

@@ -11845,7 +15553,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.alignmentPeriod

Optional

@@ -11866,7 +15574,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.crossSeriesReducer

Optional

@@ -11889,7 +15597,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields

Optional

@@ -11899,7 +15607,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.groupByFields[]

Optional

@@ -11909,7 +15617,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilter.secondaryAggregation.perSeriesAligner

Optional

@@ -11934,7 +15642,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio

Optional

@@ -11944,7 +15652,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator

Optional

@@ -11954,7 +15662,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation

Optional

@@ -11964,7 +15672,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.alignmentPeriod

Optional

@@ -11985,7 +15693,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.crossSeriesReducer

Optional

@@ -12008,7 +15716,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields

Optional

@@ -12018,7 +15726,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.groupByFields[]

Optional

@@ -12028,7 +15736,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.aggregation.perSeriesAligner

Optional

@@ -12053,7 +15761,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.denominator.filter

Required*

@@ -12063,7 +15771,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator

Optional

@@ -12073,7 +15781,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation

Optional

@@ -12083,7 +15791,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.alignmentPeriod

Optional

@@ -12104,7 +15812,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.crossSeriesReducer

Optional

@@ -12127,7 +15835,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields

Optional

@@ -12137,7 +15845,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.groupByFields[]

Optional

@@ -12147,7 +15855,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.aggregation.perSeriesAligner

Optional

@@ -12172,7 +15880,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.numerator.filter

Required*

@@ -12182,7 +15890,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter

Optional

@@ -12192,7 +15900,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.direction

Optional

@@ -12202,7 +15910,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.numTimeSeries

Optional

@@ -12212,7 +15920,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.pickTimeSeriesFilter.rankingMethod

Optional

@@ -12222,7 +15930,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation

Optional

@@ -12232,7 +15940,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.alignmentPeriod

Optional

@@ -12253,7 +15961,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.crossSeriesReducer

Optional

@@ -12276,7 +15984,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields

Optional

@@ -12286,7 +15994,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.groupByFields[]

Optional

@@ -12296,7 +16004,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesFilterRatio.secondaryAggregation.perSeriesAligner

Optional

@@ -12321,7 +16029,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.timeSeriesQueryLanguage

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.timeSeriesQueryLanguage

Optional

@@ -12331,7 +16039,7 @@ rowLayout: -

rowLayout.rows[].widgets[].scorecard.timeSeriesQuery.unitOverride

+

rowLayout.rows[].widgets[].timeSeriesTable.dataSets[].timeSeriesQuery.unitOverride

Optional

@@ -12341,152 +16049,12 @@ rowLayout: -

rowLayout.rows[].widgets[].sectionHeader

-

Optional

- - -

object

-

{% verbatim %}A widget that defines a section header for easier navigation of the dashboard.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].sectionHeader.dividerBelow

-

Optional

- - -

boolean

-

{% verbatim %}Whether to insert a divider below the section in the table of contents{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].sectionHeader.subtitle

-

Optional

- - -

string

-

{% verbatim %}The subtitle of the section{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].singleViewGroup

-

Optional

- - -

object

-

{% verbatim %}A widget that groups the other widgets by using a dropdown menu.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text

-

Optional

- - -

object

-

{% verbatim %}A raw string or markdown displaying textual content.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.content

-

Optional

- - -

string

-

{% verbatim %}The text content to be displayed.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.format

-

Optional

- - -

string

-

{% verbatim %}How the text content is formatted.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style

-

Optional

- - -

object

-

{% verbatim %}How the text is styled{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.backgroundColor

-

Optional

- - -

string

-

{% verbatim %}The background color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.fontSize

-

Optional

- - -

string

-

{% verbatim %}Font sizes for both the title and content. The title will still be larger relative to the content.{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.horizontalAlignment

-

Optional

- - -

string

-

{% verbatim %}The horizontal alignment of both the title and content{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.padding

-

Optional

- - -

string

-

{% verbatim %}The amount of padding around the widget{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.pointerLocation

-

Optional

- - -

string

-

{% verbatim %}The pointer location for this widget (also sometimes called a "tail"){% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.textColor

-

Optional

- - -

string

-

{% verbatim %}The text color as a hex string. "#RRGGBB" or "#RGB"{% endverbatim %}

- - - - -

rowLayout.rows[].widgets[].text.style.verticalAlignment

+

rowLayout.rows[].widgets[].timeSeriesTable.metricVisualization

Optional

string

-

{% verbatim %}The vertical alignment of both the title and content{% endverbatim %}

+

{% verbatim %}Optional. Store rendering strategy{% endverbatim %}

From 818dc97eed479818a35f370c0f5735a9e148cc8e Mon Sep 17 00:00:00 2001 From: justinsb Date: Wed, 26 Jun 2024 18:31:24 -0400 Subject: [PATCH 099/101] tests: create scenario tests for IAM policy --- .../scenarios/iam_add_remove/_http00.log | 140 +++++++++++++ .../scenarios/iam_add_remove/_http01.log | 140 +++++++++++++ .../scenarios/iam_add_remove/_http02.log | 196 ++++++++++++++++++ .../scenarios/iam_add_remove/_http03.log | 60 ++++++ .../scenarios/iam_add_remove/_http04.log | 78 +++++++ .../scenarios/iam_add_remove/_http05.log | 92 ++++++++ .../scenarios/iam_add_remove/_http06.log | 74 +++++++ .../scenarios/iam_add_remove/_http07.log | 67 ++++++ .../scenarios/iam_add_remove/_http08.log | 58 ++++++ .../scenarios/iam_add_remove/_object00.yaml | 28 +++ .../scenarios/iam_add_remove/_object01.yaml | 28 +++ .../scenarios/iam_add_remove/_object02.yaml | 27 +++ .../scenarios/iam_add_remove/_object03.yaml | 29 +++ .../scenarios/iam_add_remove/_object04.yaml | 29 +++ .../scenarios/iam_add_remove/_object05.yaml | 29 +++ .../scenarios/iam_add_remove/script.yaml | 116 +++++++++++ 16 files changed, 1191 insertions(+) create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http00.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http01.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http02.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http03.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http04.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http05.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http06.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http07.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_http08.log create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object00.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object01.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object02.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object03.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object04.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/_object05.yaml create mode 100644 tests/e2e/testdata/scenarios/iam_add_remove/script.yaml diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http00.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http00.log new file mode 100644 index 0000000000..fcf63c838a --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http00.log @@ -0,0 +1,140 @@ +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "Unknown service account", + "reason": "notFound" + } + ], + "message": "Unknown service account", + "status": "NOT_FOUND" + } +} + +--- + +POST https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts?alt=json&prettyPrint=false +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "accountId": "sa1-${uniqueId}", + "serviceAccount": { + "displayName": "sa1" + } +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa1", + "email": "sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa1", + "email": "sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa1", + "email": "sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa1", + "email": "sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http01.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http01.log new file mode 100644 index 0000000000..d8fc28699e --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http01.log @@ -0,0 +1,140 @@ +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "Unknown service account", + "reason": "notFound" + } + ], + "message": "Unknown service account", + "status": "NOT_FOUND" + } +} + +--- + +POST https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts?alt=json&prettyPrint=false +Content-Type: application/json +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +{ + "accountId": "sa2-${uniqueId}", + "serviceAccount": { + "displayName": "sa2" + } +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa2", + "email": "sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa2", + "email": "sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa2", + "email": "sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} + +--- + +GET https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com?alt=json&prettyPrint=false +User-Agent: google-api-go-client/0.5 Terraform/ (+https://www.terraform.io) Terraform-Plugin-SDK/2.10.1 terraform-provider-google-beta/kcc/controller-manager + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "displayName": "sa2", + "email": "sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "etag": "abcdef0123A=", + "name": "projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "oauth2ClientId": "888888888888888888888", + "projectId": "${projectId}", + "uniqueId": "111111111111111111111" +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http02.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http02.log new file mode 100644 index 0000000000..0b1a032f4c --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http02.log @@ -0,0 +1,196 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Resource 'projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}' was not found", + "status": "NOT_FOUND" + } +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Resource 'projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}' was not found", + "status": "NOT_FOUND" + } +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +404 Not Found +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "error": { + "code": 404, + "message": "Resource 'projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}' was not found", + "status": "NOT_FOUND" + } +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools?alt=json&caPoolId=capool-${uniqueId} +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +{ + "labels": { + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "tier": "ENTERPRISE" +} + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "verb": "create" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/operations/${operationID}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "done": true, + "metadata": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.OperationMetadata", + "apiVersion": "v1", + "createTime": "2024-04-01T12:34:56.123456Z", + "endTime": "2024-04-01T12:34:56.123456Z", + "target": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "verb": "create" + }, + "name": "projects/${projectId}/locations/us-central1/operations/${operationID}", + "response": { + "@type": "type.googleapis.com/google.cloud.security.privateca.v1.CaPool", + "labels": { + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "tier": "ENTERPRISE" + } +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "labels": { + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "tier": "ENTERPRISE" +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}?alt=json +Content-Type: application/json +User-Agent: kcc/controller-manager DeclarativeClientLib/0.0.1 + +200 OK +Cache-Control: private +Content-Type: application/json; charset=UTF-8 +Server: ESF +Vary: Origin +Vary: X-Origin +Vary: Referer +X-Content-Type-Options: nosniff +X-Frame-Options: SAMEORIGIN +X-Xss-Protection: 0 + +{ + "labels": { + "managed-by-cnrm": "true" + }, + "name": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}", + "tier": "ENTERPRISE" +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http03.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http03.log new file mode 100644 index 0000000000..fbc6899da6 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http03.log @@ -0,0 +1,60 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "bRNUHSzt1iCSHUGuzpwB0w==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http04.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http04.log new file mode 100644 index 0000000000..e7faf74c1e --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http04.log @@ -0,0 +1,78 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "LI3QTynmBpaKesycJhSiiw==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http05.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http05.log new file mode 100644 index 0000000000..e9455c13dc --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http05.log @@ -0,0 +1,92 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "U5pfaSq/FRHO2MZ24uAe4w==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http06.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http06.log new file mode 100644 index 0000000000..743bcfb212 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http06.log @@ -0,0 +1,74 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com", + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "7i4pdFTLXWtbIAUIv6WZxQ==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http07.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http07.log new file mode 100644 index 0000000000..fe2f63cefc --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http07.log @@ -0,0 +1,67 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "members": [ + "serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "9PV0OUST/G0nKsXZvXJV9Q==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_http08.log b/tests/e2e/testdata/scenarios/iam_add_remove/_http08.log new file mode 100644 index 0000000000..a557df60b5 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_http08.log @@ -0,0 +1,58 @@ +GET https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:getIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + + + +{ + "bindings": [ + { + "role": "roles/privateca.admin" + }, + { + "members": [ + "serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com" + ], + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} + +--- + +POST https://privateca.googleapis.com/v1/projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}:setIamPolicy?%24alt=json%3Benum-encoding%3Dint +Content-Type: application/json +x-goog-request-params: resource=projects%2F${projectId}%2Flocations%2Fus-central1%2FcaPools%2Fcapool-${uniqueId} + +{ + "policy": { + "bindings": [ + { + "role": "roles/privateca.admin" + }, + { + "role": "roles/privateca.auditor" + } + ], + "etag": "H6Lv7qQP+HNJ0J0EG/qAXQ==", + "version": 3 + }, + "resource": "projects/${projectId}/locations/us-central1/caPools/capool-${uniqueId}" +} + + + +{ + "bindings": [ + { + "role": "roles/privateca.admin" + }, + { + "role": "roles/privateca.auditor" + } + ], + "etag": "abcdef0123A=", + "version": 3 +} \ No newline at end of file diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object00.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object00.yaml new file mode 100644 index 0000000000..d1fa25a549 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object00.yaml @@ -0,0 +1,28 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: sa1 + namespace: ${projectId} +spec: + displayName: sa1 + resourceID: sa1-${uniqueId} +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + email: sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com + member: serviceAccount:sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com + name: projects/${projectId}/serviceAccounts/sa1-${uniqueId}@${projectId}.iam.gserviceaccount.com + observedGeneration: 1 + uniqueId: "12345678" diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object01.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object01.yaml new file mode 100644 index 0000000000..391bffb801 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object01.yaml @@ -0,0 +1,28 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: sa2 + namespace: ${projectId} +spec: + displayName: sa2 + resourceID: sa2-${uniqueId} +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + email: sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com + member: serviceAccount:sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com + name: projects/${projectId}/serviceAccounts/sa2-${uniqueId}@${projectId}.iam.gserviceaccount.com + observedGeneration: 1 + uniqueId: "12345678" diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object02.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object02.yaml new file mode 100644 index 0000000000..93b83bc7bf --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object02.yaml @@ -0,0 +1,27 @@ +apiVersion: privateca.cnrm.cloud.google.com/v1beta1 +kind: PrivateCACAPool +metadata: + annotations: + cnrm.cloud.google.com/management-conflict-prevention-policy: none + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: privatecacapool + namespace: ${projectId} +spec: + location: us-central1 + projectRef: + external: projects/${projectId} + resourceID: capool-${uniqueId} + tier: ENTERPRISE +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 1 diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object03.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object03.yaml new file mode 100644 index 0000000000..053d74aa7f --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object03.yaml @@ -0,0 +1,29 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: admin-sa1 + namespace: ${projectId} +spec: + memberFrom: + serviceAccountRef: + name: sa1 + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + role: roles/privateca.admin +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 1 diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object04.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object04.yaml new file mode 100644 index 0000000000..f91708ec6a --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object04.yaml @@ -0,0 +1,29 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: admin-sa2 + namespace: ${projectId} +spec: + memberFrom: + serviceAccountRef: + name: sa2 + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + role: roles/privateca.admin +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 1 diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/_object05.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/_object05.yaml new file mode 100644 index 0000000000..dd283677aa --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/_object05.yaml @@ -0,0 +1,29 @@ +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + cnrm.cloud.google.com/state-into-spec: merge + finalizers: + - cnrm.cloud.google.com/finalizer + - cnrm.cloud.google.com/deletion-defender + generation: 1 + name: auditor-sa2 + namespace: ${projectId} +spec: + memberFrom: + serviceAccountRef: + name: sa2 + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + role: roles/privateca.auditor +status: + conditions: + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: The resource is up to date + reason: UpToDate + status: "True" + type: Ready + observedGeneration: 1 diff --git a/tests/e2e/testdata/scenarios/iam_add_remove/script.yaml b/tests/e2e/testdata/scenarios/iam_add_remove/script.yaml new file mode 100644 index 0000000000..a5d9e01070 --- /dev/null +++ b/tests/e2e/testdata/scenarios/iam_add_remove/script.yaml @@ -0,0 +1,116 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +metadata: + name: sa1 +spec: + displayName: sa1 + resourceID: sa1-${uniqueId} + +--- + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMServiceAccount +metadata: + name: sa2 +spec: + displayName: sa2 + resourceID: sa2-${uniqueId} + +--- + +apiVersion: privateca.cnrm.cloud.google.com/v1beta1 +kind: PrivateCACAPool +metadata: + name: privatecacapool +spec: + projectRef: + external: projects/${projectId} + location: "us-central1" + tier: ENTERPRISE + resourceID: capool-${uniqueId} + +--- + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: admin-sa1 +spec: + memberFrom: + serviceAccountRef: + name: sa1 + role: roles/privateca.admin + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + +--- + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: admin-sa2 +spec: + memberFrom: + serviceAccountRef: + name: sa2 + role: roles/privateca.admin + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + +--- + +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: auditor-sa2 +spec: + memberFrom: + serviceAccountRef: + name: sa2 + role: roles/privateca.auditor + resourceRef: + apiVersion: privateca.cnrm.cloud.google.com/v1beta1 + kind: PrivateCACAPool + name: privatecacapool + +--- + +TEST: DELETE +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: admin-sa2 + +--- + +TEST: DELETE +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: admin-sa1 + +--- + +TEST: DELETE +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + name: auditor-sa2 From a1408163580183bb275f057aee0780d2bd640b39 Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 28 Jun 2024 16:08:35 -0400 Subject: [PATCH 100/101] monitoringresource: support incidentList --- .../v1beta1/monitoringdashboard_types.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 39 ++- ...ards.monitoring.cnrm.cloud.google.com.yaml | 166 ++++++++++ dev/tools/proto-to-mapper/main.go | 77 +++-- go.mod | 2 +- .../v1beta1/monitoringdashboard_types.go | 28 ++ .../v1beta1/zz_generated.deepcopy.go | 66 ++++ .../dashboard_generated.mappings.go | 46 +-- .../direct/monitoring/dashboard_mappings.go | 20 ++ ...ated_export_monitoringdashboardfull.golden | 11 + ...object_monitoringdashboardfull.golden.yaml | 11 + .../monitoringdashboardfull/_http.log | 198 +++++++++++ .../monitoringdashboardfull/create.yaml | 11 + .../monitoring/monitoringdashboard.md | 308 ++++++++++++++++++ 14 files changed, 944 insertions(+), 60 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 89b4259351..146cf98e95 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -453,10 +453,8 @@ type Widget struct { // A widget that shows a stream of logs. LogsPanel *LogsPanel `json:"logsPanel,omitempty"` - /*NOTYET // A widget that shows list of incidents. IncidentList *IncidentList `json:"incidentList,omitempty"` - */ // A widget that displays timeseries data as a pie chart. PieChart *PieChart `json:"pieChart,omitempty"` @@ -605,7 +603,7 @@ type IncidentList struct { // The resource doesn't need to be fully specified. That is, you can specify // the resource type but not the values of the resource labels. // The resource type and labels are used for filtering. - MonitoredResources []string `json:"monitoredResources,omitempty"` + MonitoredResources []MonitoredResource `json:"monitoredResources,omitempty"` // Optional. A list of alert policy names to filter the incident list by. // Don't include the project ID prefix in the policy name. For @@ -613,6 +611,23 @@ type IncidentList struct { PolicyNames []string `json:"policyNames,omitempty"` } +// +kcc:proto=google.api.MonitoredResource +type MonitoredResource struct { + // Required. The monitored resource type. This field must match + // the `type` field of a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + // object. For example, the type of a Compute Engine VM instance is + // `gce_instance`. Some descriptors include the service name in the type; for + // example, the type of a Datastream stream is + // `datastream.googleapis.com/Stream`. + Type *string `json:"type,omitempty"` + + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `json:"labels,omitempty"` +} + // +kcc:proto=google.monitoring.dashboard.v1.TableDisplayOptions type TableDisplayOptions struct { // Optional. This field is unused and has been replaced by diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 2ed8807085..c673669d0a 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -290,8 +290,10 @@ func (in *IncidentList) DeepCopyInto(out *IncidentList) { *out = *in if in.MonitoredResources != nil { in, out := &in.MonitoredResources, &out.MonitoredResources - *out = make([]string, len(*in)) - copy(*out, *in) + *out = make([]MonitoredResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.PolicyNames != nil { in, out := &in.PolicyNames, &out.PolicyNames @@ -337,6 +339,34 @@ func (in *LogsPanel) DeepCopy() *LogsPanel { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoredResource) DeepCopyInto(out *MonitoredResource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoredResource. +func (in *MonitoredResource) DeepCopy() *MonitoredResource { + if in == nil { + return nil + } + out := new(MonitoredResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoringDashboard) DeepCopyInto(out *MonitoringDashboard) { *out = *in @@ -1277,6 +1307,11 @@ func (in *Widget) DeepCopyInto(out *Widget) { *out = new(LogsPanel) (*in).DeepCopyInto(*out) } + if in.IncidentList != nil { + in, out := &in.IncidentList, &out.IncidentList + *out = new(IncidentList) + (*in).DeepCopyInto(*out) + } if in.PieChart != nil { in, out := &in.PieChart, &out.PieChart *out = new(PieChart) diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index 97a9224a5a..a75d989f6e 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -203,6 +203,48 @@ spec: up of alphanumerics, dashes and underscores. Widget ids are optional. type: string + incidentList: + description: A widget that shows list of incidents. + properties: + monitoredResources: + description: Optional. The monitored resource + for which incidents are listed. The resource + doesn't need to be fully specified. That is, + you can specify the resource type but not the + values of the resource labels. The resource + type and labels are used for filtering. + items: + properties: + labels: + additionalProperties: + type: string + description: Required. Values for all of + the labels listed in the associated monitored + resource descriptor. For example, Compute + Engine VM instances use the labels `"project_id"`, + `"instance_id"`, and `"zone"`. + type: object + type: + description: Required. The monitored resource + type. This field must match the `type` + field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + object. For example, the type of a Compute + Engine VM instance is `gce_instance`. + Some descriptors include the service name + in the type; for example, the type of + a Datastream stream is `datastream.googleapis.com/Stream`. + type: string + type: object + type: array + policyNames: + description: Optional. A list of alert policy + names to filter the incident list by. Don't + include the project ID prefix in the policy + name. For example, use `alertPolicies/utilization`. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -3080,6 +3122,46 @@ spec: of alphanumerics, dashes and underscores. Widget ids are optional. type: string + incidentList: + description: A widget that shows list of incidents. + properties: + monitoredResources: + description: Optional. The monitored resource for which + incidents are listed. The resource doesn't need to + be fully specified. That is, you can specify the resource + type but not the values of the resource labels. The + resource type and labels are used for filtering. + items: + properties: + labels: + additionalProperties: + type: string + description: Required. Values for all of the labels + listed in the associated monitored resource + descriptor. For example, Compute Engine VM instances + use the labels `"project_id"`, `"instance_id"`, + and `"zone"`. + type: object + type: + description: Required. The monitored resource + type. This field must match the `type` field + of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + object. For example, the type of a Compute Engine + VM instance is `gce_instance`. Some descriptors + include the service name in the type; for example, + the type of a Datastream stream is `datastream.googleapis.com/Stream`. + type: string + type: object + type: array + policyNames: + description: Optional. A list of alert policy names + to filter the incident list by. Don't include the + project ID prefix in the policy name. For example, + use `alertPolicies/utilization`. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -5810,6 +5892,48 @@ spec: up of alphanumerics, dashes and underscores. Widget ids are optional. type: string + incidentList: + description: A widget that shows list of incidents. + properties: + monitoredResources: + description: Optional. The monitored resource for + which incidents are listed. The resource doesn't + need to be fully specified. That is, you can specify + the resource type but not the values of the resource + labels. The resource type and labels are used + for filtering. + items: + properties: + labels: + additionalProperties: + type: string + description: Required. Values for all of the + labels listed in the associated monitored + resource descriptor. For example, Compute + Engine VM instances use the labels `"project_id"`, + `"instance_id"`, and `"zone"`. + type: object + type: + description: Required. The monitored resource + type. This field must match the `type` field + of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + object. For example, the type of a Compute + Engine VM instance is `gce_instance`. Some + descriptors include the service name in + the type; for example, the type of a Datastream + stream is `datastream.googleapis.com/Stream`. + type: string + type: object + type: array + policyNames: + description: Optional. A list of alert policy names + to filter the incident list by. Don't include + the project ID prefix in the policy name. For + example, use `alertPolicies/utilization`. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: @@ -8689,6 +8813,48 @@ spec: up of alphanumerics, dashes and underscores. Widget ids are optional. type: string + incidentList: + description: A widget that shows list of incidents. + properties: + monitoredResources: + description: Optional. The monitored resource + for which incidents are listed. The resource + doesn't need to be fully specified. That is, + you can specify the resource type but not the + values of the resource labels. The resource + type and labels are used for filtering. + items: + properties: + labels: + additionalProperties: + type: string + description: Required. Values for all of + the labels listed in the associated monitored + resource descriptor. For example, Compute + Engine VM instances use the labels `"project_id"`, + `"instance_id"`, and `"zone"`. + type: object + type: + description: Required. The monitored resource + type. This field must match the `type` + field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + object. For example, the type of a Compute + Engine VM instance is `gce_instance`. + Some descriptors include the service name + in the type; for example, the type of + a Datastream stream is `datastream.googleapis.com/Stream`. + type: string + type: object + type: array + policyNames: + description: Optional. A list of alert policy + names to filter the incident list by. Don't + include the project ID prefix in the policy + name. For example, use `alertPolicies/utilization`. + items: + type: string + type: array + type: object logsPanel: description: A widget that shows a stream of logs. properties: diff --git a/dev/tools/proto-to-mapper/main.go b/dev/tools/proto-to-mapper/main.go index 77a2d72162..b0ea7a29fe 100644 --- a/dev/tools/proto-to-mapper/main.go +++ b/dev/tools/proto-to-mapper/main.go @@ -258,51 +258,64 @@ func (v *visitor) writeTypes(out io.Writer, msg protoreflect.MessageDescriptor) jsonName := field.JSONName() goType := "" - switch field.Kind() { - case protoreflect.MessageKind: - goType = protoNameForType(field.Message()) + if field.IsMap() { + entryMsg := field.Message() + keyKind := entryMsg.Fields().ByName("key").Kind() + valueKind := entryMsg.Fields().ByName("value").Kind() + if keyKind == protoreflect.StringKind && valueKind == protoreflect.StringKind { + goType = "map[string]string" + } else if keyKind == protoreflect.StringKind && valueKind == protoreflect.Int64Kind { + goType = "map[string]int64" + } else { + fmt.Fprintf(out, "// TODO: map type %v %v\n", keyKind, valueKind) + } + } else { + switch field.Kind() { + case protoreflect.MessageKind: + goType = protoNameForType(field.Message()) - case protoreflect.EnumKind: - goType = "string" //string(field.Enum().Name()) + case protoreflect.EnumKind: + goType = "string" //string(field.Enum().Name()) - case protoreflect.StringKind: - goType = "string" + case protoreflect.StringKind: + goType = "string" - case protoreflect.Int32Kind: - goType = "int32" + case protoreflect.Int32Kind: + goType = "int32" - case protoreflect.Int64Kind: - goType = "int64" + case protoreflect.Int64Kind: + goType = "int64" - case protoreflect.Uint32Kind: - goType = "uint32" + case protoreflect.Uint32Kind: + goType = "uint32" - case protoreflect.Uint64Kind: - goType = "uint64" + case protoreflect.Uint64Kind: + goType = "uint64" - case protoreflect.Fixed64Kind: - goType = "uint64" + case protoreflect.Fixed64Kind: + goType = "uint64" - case protoreflect.BoolKind: - goType = "bool" + case protoreflect.BoolKind: + goType = "bool" - case protoreflect.DoubleKind: - goType = "float64" + case protoreflect.DoubleKind: + goType = "float64" - case protoreflect.FloatKind: - goType = "float32" + case protoreflect.FloatKind: + goType = "float32" - case protoreflect.BytesKind: - goType = "[]byte" + case protoreflect.BytesKind: + goType = "[]byte" - default: - klog.Fatalf("unhandled kind %q for field %v", field.Kind(), field) - } + default: + klog.Fatalf("unhandled kind %q for field %v", field.Kind(), field) + } - if field.Cardinality() == protoreflect.Repeated { - goType = "[]" + goType - } else { - goType = "*" + goType + if field.Cardinality() == protoreflect.Repeated { + goType = "[]" + goType + } else { + goType = "*" + goType + } } // Blank line between fields for readability diff --git a/go.mod b/go.mod index c656ecae7b..384cc5e431 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( golang.org/x/sync v0.7.0 golang.org/x/time v0.5.0 google.golang.org/api v0.185.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 gopkg.in/dnaeon/go-vcr.v3 v3.2.0 @@ -213,7 +214,6 @@ require ( gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 90b41711e5..6b820025af 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -202,6 +202,16 @@ type DashboardGridLayout struct { Widgets []DashboardWidgets `json:"widgets,omitempty"` } +type DashboardIncidentList struct { + /* Optional. The monitored resource for which incidents are listed. The resource doesn't need to be fully specified. That is, you can specify the resource type but not the values of the resource labels. The resource type and labels are used for filtering. */ + // +optional + MonitoredResources []DashboardMonitoredResources `json:"monitoredResources,omitempty"` + + /* Optional. A list of alert policy names to filter the incident list by. Don't include the project ID prefix in the policy name. For example, use `alertPolicies/utilization`. */ + // +optional + PolicyNames []string `json:"policyNames,omitempty"` +} + type DashboardLogsPanel struct { /* A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries. */ // +optional @@ -212,6 +222,16 @@ type DashboardLogsPanel struct { ResourceNames []DashboardResourceNames `json:"resourceNames,omitempty"` } +type DashboardMonitoredResources struct { + /* Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `"project_id"`, `"instance_id"`, and `"zone"`. */ + // +optional + Labels map[string]string `json:"labels,omitempty"` + + /* Required. The monitored resource type. This field must match the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`. */ + // +optional + Type *string `json:"type,omitempty"` +} + type DashboardMosaicLayout struct { /* The number of columns in the mosaic grid. The number of columns must be between 1 and 12, inclusive. */ // +optional @@ -637,6 +657,10 @@ type DashboardWidget struct { // +optional Id *string `json:"id,omitempty"` + /* A widget that shows list of incidents. */ + // +optional + IncidentList *DashboardIncidentList `json:"incidentList,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` @@ -695,6 +719,10 @@ type DashboardWidgets struct { // +optional Id *string `json:"id,omitempty"` + /* A widget that shows list of incidents. */ + // +optional + IncidentList *DashboardIncidentList `json:"incidentList,omitempty"` + /* A widget that shows a stream of logs. */ // +optional LogsPanel *DashboardLogsPanel `json:"logsPanel,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index 0170a30462..ee47d9cde8 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -824,6 +824,34 @@ func (in *DashboardGridLayout) DeepCopy() *DashboardGridLayout { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardIncidentList) DeepCopyInto(out *DashboardIncidentList) { + *out = *in + if in.MonitoredResources != nil { + in, out := &in.MonitoredResources, &out.MonitoredResources + *out = make([]DashboardMonitoredResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyNames != nil { + in, out := &in.PolicyNames, &out.PolicyNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardIncidentList. +func (in *DashboardIncidentList) DeepCopy() *DashboardIncidentList { + if in == nil { + return nil + } + out := new(DashboardIncidentList) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardLogsPanel) DeepCopyInto(out *DashboardLogsPanel) { *out = *in @@ -852,6 +880,34 @@ func (in *DashboardLogsPanel) DeepCopy() *DashboardLogsPanel { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardMonitoredResources) DeepCopyInto(out *DashboardMonitoredResources) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardMonitoredResources. +func (in *DashboardMonitoredResources) DeepCopy() *DashboardMonitoredResources { + if in == nil { + return nil + } + out := new(DashboardMonitoredResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardMosaicLayout) DeepCopyInto(out *DashboardMosaicLayout) { *out = *in @@ -1575,6 +1631,11 @@ func (in *DashboardWidget) DeepCopyInto(out *DashboardWidget) { *out = new(string) **out = **in } + if in.IncidentList != nil { + in, out := &in.IncidentList, &out.IncidentList + *out = new(DashboardIncidentList) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) @@ -1661,6 +1722,11 @@ func (in *DashboardWidgets) DeepCopyInto(out *DashboardWidgets) { *out = new(string) **out = **in } + if in.IncidentList != nil { + in, out := &in.IncidentList, &out.IncidentList + *out = new(DashboardIncidentList) + (*in).DeepCopyInto(*out) + } if in.LogsPanel != nil { in, out := &in.LogsPanel, &out.LogsPanel *out = new(DashboardLogsPanel) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 83df3d984f..9ed61ce43b 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -154,26 +154,24 @@ func GridLayout_ToProto(mapCtx *MapContext, in *krm.GridLayout) *pb.GridLayout { out.Widgets = Slice_ToProto(mapCtx, in.Widgets, Widget_ToProto) return out } - -// func IncidentList_FromProto(mapCtx *MapContext, in *pb.IncidentList) *krm.IncidentList { -// if in == nil { -// return nil -// } -// out := &krm.IncidentList{} -// out.MonitoredResources = Slice_FromProto(mapCtx, in.MonitoredResources, string_FromProto) -// out.PolicyNames = in.PolicyNames -// return out -// } -// -// func IncidentList_ToProto(mapCtx *MapContext, in *krm.IncidentList) *pb.IncidentList { -// if in == nil { -// return nil -// } -// out := &pb.IncidentList{} -// out.MonitoredResources = Slice_ToProto(mapCtx, in.MonitoredResources, string_ToProto) -// out.PolicyNames = in.PolicyNames -// return out -// } +func IncidentList_FromProto(mapCtx *MapContext, in *pb.IncidentList) *krm.IncidentList { + if in == nil { + return nil + } + out := &krm.IncidentList{} + out.MonitoredResources = Slice_FromProto(mapCtx, in.MonitoredResources, MonitoredResource_FromProto) + out.PolicyNames = in.PolicyNames + return out +} +func IncidentList_ToProto(mapCtx *MapContext, in *krm.IncidentList) *pb.IncidentList { + if in == nil { + return nil + } + out := &pb.IncidentList{} + out.MonitoredResources = Slice_ToProto(mapCtx, in.MonitoredResources, MonitoredResource_ToProto) + out.PolicyNames = in.PolicyNames + return out +} func LogsPanel_FromProto(mapCtx *MapContext, in *pb.LogsPanel) *krm.LogsPanel { if in == nil { return nil @@ -193,6 +191,7 @@ func LogsPanel_ToProto(mapCtx *MapContext, in *krm.LogsPanel) *pb.LogsPanel { out.ResourceNames = LogsPanel_ResourceNames_ToProto(mapCtx, in.ResourceNames) return out } + func MonitoringDashboardSpec_FromProto(mapCtx *MapContext, in *pb.Dashboard) *krm.MonitoringDashboardSpec { if in == nil { return nil @@ -777,7 +776,7 @@ func Widget_FromProto(mapCtx *MapContext, in *pb.Widget) *krm.Widget { out.TimeSeriesTable = TimeSeriesTable_FromProto(mapCtx, in.GetTimeSeriesTable()) out.CollapsibleGroup = CollapsibleGroup_FromProto(mapCtx, in.GetCollapsibleGroup()) out.LogsPanel = LogsPanel_FromProto(mapCtx, in.GetLogsPanel()) - // MISSING: IncidentList + out.IncidentList = IncidentList_FromProto(mapCtx, in.GetIncidentList()) out.PieChart = PieChart_FromProto(mapCtx, in.GetPieChart()) out.ErrorReportingPanel = ErrorReportingPanel_FromProto(mapCtx, in.GetErrorReportingPanel()) out.SectionHeader = SectionHeader_FromProto(mapCtx, in.GetSectionHeader()) @@ -815,7 +814,9 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { if oneof := LogsPanel_ToProto(mapCtx, in.LogsPanel); oneof != nil { out.Content = &pb.Widget_LogsPanel{LogsPanel: oneof} } - // MISSING: IncidentList + if oneof := IncidentList_ToProto(mapCtx, in.IncidentList); oneof != nil { + out.Content = &pb.Widget_IncidentList{IncidentList: oneof} + } if oneof := PieChart_ToProto(mapCtx, in.PieChart); oneof != nil { out.Content = &pb.Widget_PieChart{PieChart: oneof} } @@ -831,6 +832,7 @@ func Widget_ToProto(mapCtx *MapContext, in *krm.Widget) *pb.Widget { out.Id = ValueOf(in.Id) return out } + func XyChart_FromProto(mapCtx *MapContext, in *pb.XyChart) *krm.XyChart { if in == nil { return nil diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index fede68452a..03ddb3d2a1 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -23,6 +23,8 @@ import ( pb "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb" krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/monitoring/v1beta1" refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" ) @@ -217,6 +219,24 @@ func TimeSeriesTable_ColumnSettings_FromProto(mapCtx *MapContext, in *pb.TimeSer // We want to always output the visible field, i.e. `visible: false` // We probably can automate this, because the visible field is required. out.Visible = PtrTo(in.GetVisible()) + return out +} +func MonitoredResource_FromProto(mapCtx *MapContext, in *monitoredres.MonitoredResource) *krm.MonitoredResource { + if in == nil { + return nil + } + out := &krm.MonitoredResource{} + out.Type = LazyPtr(in.GetType()) + out.Labels = in.Labels + return out +} +func MonitoredResource_ToProto(mapCtx *MapContext, in *krm.MonitoredResource) *monitoredres.MonitoredResource { + if in == nil { + return nil + } + out := &monitoredres.MonitoredResource{} + out.Type = ValueOf(in.Type) + out.Labels = in.Labels return out } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 110ddc9fc7..217d7da053 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -120,6 +120,17 @@ spec: resource.type="gce_instance" metricVisualization: NUMBER title: TimeSeriesTable Widget + - incidentList: + monitoredResources: + - labels: + instance_id: "12345678901234" + project_id: my-project + zone: us-central1-a + type: gce_instance + policyNames: + - foo + - bar + title: IncidentList Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 6cbe8ca4ef..3419e0601c 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -128,6 +128,17 @@ spec: resource.type="gce_instance" metricVisualization: NUMBER title: TimeSeriesTable Widget + - incidentList: + monitoredResources: + - labels: + instance_id: "12345678901234" + project_id: my-project + zone: us-central1-a + type: gce_instance + policyNames: + - foo + - bar + title: IncidentList Widget displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 39fdcda1fb..1e9b7c4108 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -379,6 +379,25 @@ x-goog-request-params: parent=projects%2F${projectId} "metricVisualization": 1 }, "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } @@ -589,6 +608,25 @@ X-Xss-Protection: 0 "metricVisualization": "NUMBER" }, "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } @@ -807,6 +845,25 @@ X-Xss-Protection: 0 "metricVisualization": "NUMBER" }, "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } @@ -984,6 +1041,53 @@ x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmon ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": 2 + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": 1 + }, + "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } @@ -1166,6 +1270,53 @@ X-Xss-Protection: 0 ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": "NUMBER" + }, + "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } @@ -1356,6 +1507,53 @@ X-Xss-Protection: 0 ] }, "title": "ErrorReporting Widget" + }, + { + "timeSeriesTable": { + "columnSettings": [ + { + "column": "column1", + "visible": true + }, + { + "column": "column2" + } + ], + "dataSets": [ + { + "timeSeriesQuery": { + "timeSeriesFilter": { + "aggregation": { + "alignmentPeriod": "60s", + "perSeriesAligner": "ALIGN_RATE" + }, + "filter": "metric.type=\"compute.googleapis.com/instance/disk/read_bytes_count\" resource.type=\"gce_instance\"" + } + } + } + ], + "metricVisualization": "NUMBER" + }, + "title": "TimeSeriesTable Widget" + }, + { + "incidentList": { + "monitoredResources": [ + { + "labels": { + "instance_id": "12345678901234", + "project_id": "my-project", + "zone": "us-central1-a" + }, + "type": "gce_instance" + } + ], + "policyNames": [ + "foo", + "bar" + ] + }, + "title": "IncidentList Widget" } ] } diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index 41a923a623..dd0bb817d4 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -141,3 +141,14 @@ spec: aggregation: alignmentPeriod: "60s" perSeriesAligner: "ALIGN_RATE" + - title: "IncidentList Widget" + incidentList: + monitoredResources: + - type: "gce_instance" + labels: + project_id: my-project + instance_id: "12345678901234" + zone: "us-central1-a" + policyNames: + - foo + - bar diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 961a9533c5..71e301c5e5 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -100,6 +100,13 @@ columnLayout: versions: - string id: string + incidentList: + monitoredResources: + - labels: + string: string + type: string + policyNames: + - string logsPanel: filter: string resourceNames: @@ -396,6 +403,13 @@ gridLayout: versions: - string id: string + incidentList: + monitoredResources: + - labels: + string: string + type: string + policyNames: + - string logsPanel: filter: string resourceNames: @@ -693,6 +707,13 @@ mosaicLayout: versions: - string id: string + incidentList: + monitoredResources: + - labels: + string: string + type: string + policyNames: + - string logsPanel: filter: string resourceNames: @@ -998,6 +1019,13 @@ rowLayout: versions: - string id: string + incidentList: + monitoredResources: + - labels: + string: string + type: string + policyNames: + - string logsPanel: filter: string resourceNames: @@ -1546,6 +1574,76 @@ rowLayout:

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ + +

columnLayout.columns[].widgets[].incidentList

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows list of incidents.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.monitoredResources

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The monitored resource for which incidents are listed. The resource doesn't need to be fully specified. That is, you can specify the resource type but not the values of the resource labels. The resource type and labels are used for filtering.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.monitoredResources[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.monitoredResources[].labels

+

Optional

+ + +

map (key: string, value: string)

+

{% verbatim %}Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `"project_id"`, `"instance_id"`, and `"zone"`.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.monitoredResources[].type

+

Optional

+ + +

string

+

{% verbatim %}Required. The monitored resource type. This field must match the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.policyNames

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. A list of alert policy names to filter the incident list by. Don't include the project ID prefix in the policy name. For example, use `alertPolicies/utilization`.{% endverbatim %}

+ + + + +

columnLayout.columns[].widgets[].incidentList.policyNames[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

columnLayout.columns[].widgets[].logsPanel

@@ -5443,6 +5541,76 @@ rowLayout:

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ + +

gridLayout.widgets[].incidentList

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows list of incidents.{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.monitoredResources

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The monitored resource for which incidents are listed. The resource doesn't need to be fully specified. That is, you can specify the resource type but not the values of the resource labels. The resource type and labels are used for filtering.{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.monitoredResources[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.monitoredResources[].labels

+

Optional

+ + +

map (key: string, value: string)

+

{% verbatim %}Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `"project_id"`, `"instance_id"`, and `"zone"`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.monitoredResources[].type

+

Optional

+ + +

string

+

{% verbatim %}Required. The monitored resource type. This field must match the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.policyNames

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. A list of alert policy names to filter the incident list by. Don't include the project ID prefix in the policy name. For example, use `alertPolicies/utilization`.{% endverbatim %}

+ + + + +

gridLayout.widgets[].incidentList.policyNames[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

gridLayout.widgets[].logsPanel

@@ -9350,6 +9518,76 @@ rowLayout:

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ + +

mosaicLayout.tiles[].widget.incidentList

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows list of incidents.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.monitoredResources

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The monitored resource for which incidents are listed. The resource doesn't need to be fully specified. That is, you can specify the resource type but not the values of the resource labels. The resource type and labels are used for filtering.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.monitoredResources[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.monitoredResources[].labels

+

Optional

+ + +

map (key: string, value: string)

+

{% verbatim %}Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `"project_id"`, `"instance_id"`, and `"zone"`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.monitoredResources[].type

+

Optional

+ + +

string

+

{% verbatim %}Required. The monitored resource type. This field must match the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.policyNames

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. A list of alert policy names to filter the incident list by. Don't include the project ID prefix in the policy name. For example, use `alertPolicies/utilization`.{% endverbatim %}

+ + + + +

mosaicLayout.tiles[].widget.incidentList.policyNames[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

mosaicLayout.tiles[].widget.logsPanel

@@ -13347,6 +13585,76 @@ rowLayout:

{% verbatim %}Optional. The widget id. Ids may be made up of alphanumerics, dashes and underscores. Widget ids are optional.{% endverbatim %}

+ + +

rowLayout.rows[].widgets[].incidentList

+

Optional

+ + +

object

+

{% verbatim %}A widget that shows list of incidents.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.monitoredResources

+

Optional

+ + +

list (object)

+

{% verbatim %}Optional. The monitored resource for which incidents are listed. The resource doesn't need to be fully specified. That is, you can specify the resource type but not the values of the resource labels. The resource type and labels are used for filtering.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.monitoredResources[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.monitoredResources[].labels

+

Optional

+ + +

map (key: string, value: string)

+

{% verbatim %}Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels `"project_id"`, `"instance_id"`, and `"zone"`.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.monitoredResources[].type

+

Optional

+ + +

string

+

{% verbatim %}Required. The monitored resource type. This field must match the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For example, the type of a Compute Engine VM instance is `gce_instance`. Some descriptors include the service name in the type; for example, the type of a Datastream stream is `datastream.googleapis.com/Stream`.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.policyNames

+

Optional

+ + +

list (string)

+

{% verbatim %}Optional. A list of alert policy names to filter the incident list by. Don't include the project ID prefix in the policy name. For example, use `alertPolicies/utilization`.{% endverbatim %}

+ + + + +

rowLayout.rows[].widgets[].incidentList.policyNames[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

rowLayout.rows[].widgets[].logsPanel

From 6914da3c71e93bf4b66eab266b48587be5f5cc20 Mon Sep 17 00:00:00 2001 From: justinsb Date: Fri, 28 Jun 2024 13:33:56 -0400 Subject: [PATCH 101/101] monitoringdashboard: add dashboardFilters --- .../v1beta1/monitoringdashboard_types.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 7 ++ ...ards.monitoring.cnrm.cloud.google.com.yaml | 23 ++++++ docs/releasenotes/release-1.120.md | 1 + .../v1beta1/monitoringdashboard_types.go | 21 ++++++ .../v1beta1/zz_generated.deepcopy.go | 38 ++++++++++ .../dashboard_generated.mappings.go | 55 +++++++------- .../direct/monitoring/dashboard_mappings.go | 9 +++ ...ated_export_monitoringdashboardfull.golden | 7 ++ ...object_monitoringdashboardfull.golden.yaml | 7 ++ .../monitoringdashboardfull/_http.log | 72 +++++++++++++++++++ .../monitoringdashboardfull/create.yaml | 7 ++ .../monitoring/monitoringdashboard.md | 65 +++++++++++++++++ 13 files changed, 284 insertions(+), 30 deletions(-) diff --git a/apis/monitoring/v1beta1/monitoringdashboard_types.go b/apis/monitoring/v1beta1/monitoringdashboard_types.go index 146cf98e95..d08a70712c 100644 --- a/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -825,10 +825,10 @@ type MonitoringDashboardSpec struct { // arranged vertically. ColumnLayout *ColumnLayout `json:"columnLayout,omitempty"` - /*NOTYET // Filters to reduce the amount of data charted based on the filter criteria. DashboardFilters []DashboardFilter `json:"dashboardFilters,omitempty"` + /*NOTYET // Labels applied to the dashboard Labels []Dashboard_LabelsEntry `json:"labels,omitempty"` */ diff --git a/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/apis/monitoring/v1beta1/zz_generated.deepcopy.go index c673669d0a..82139e7806 100644 --- a/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -462,6 +462,13 @@ func (in *MonitoringDashboardSpec) DeepCopyInto(out *MonitoringDashboardSpec) { *out = new(ColumnLayout) (*in).DeepCopyInto(*out) } + if in.DashboardFilters != nil { + in, out := &in.DashboardFilters, &out.DashboardFilters + *out = make([]DashboardFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index a75d989f6e..dd684086a2 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -2984,6 +2984,29 @@ spec: type: object type: array type: object + dashboardFilters: + description: Filters to reduce the amount of data charted based on + the filter criteria. + items: + properties: + filterType: + description: The specified filter type + type: string + labelKey: + description: Required. The key for the label + type: string + stringValue: + description: A variable-length string value. + type: string + templateVariable: + description: The placeholder text that can be referenced in + a filter string or MQL query. If omitted, the dashboard filter + will be applied to all relevant widgets in the dashboard. + type: string + required: + - labelKey + type: object + type: array displayName: description: Required. The mutable, human-readable name. type: string diff --git a/docs/releasenotes/release-1.120.md b/docs/releasenotes/release-1.120.md index 335ac51e31..fc17ac5062 100644 --- a/docs/releasenotes/release-1.120.md +++ b/docs/releasenotes/release-1.120.md @@ -29,6 +29,7 @@ output fields from GCP APIs are in `status.observedState.*` * `MonitoringDashboard` + * Added `dashboardFilters` support. * Added `alertChart` widgets. * Added `collapsibleGroup` widgets. * Added `pieChart` widgets. diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 6b820025af..df6198a2cb 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -136,6 +136,23 @@ type DashboardColumns struct { Widgets []DashboardWidgets `json:"widgets,omitempty"` } +type DashboardDashboardFilters struct { + /* The specified filter type */ + // +optional + FilterType *string `json:"filterType,omitempty"` + + /* Required. The key for the label */ + LabelKey string `json:"labelKey"` + + /* A variable-length string value. */ + // +optional + StringValue *string `json:"stringValue,omitempty"` + + /* The placeholder text that can be referenced in a filter string or MQL query. If omitted, the dashboard filter will be applied to all relevant widgets in the dashboard. */ + // +optional + TemplateVariable *string `json:"templateVariable,omitempty"` +} + type DashboardDataSets struct { /* A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value. */ // +optional @@ -810,6 +827,10 @@ type MonitoringDashboardSpec struct { // +optional ColumnLayout *DashboardColumnLayout `json:"columnLayout,omitempty"` + /* Filters to reduce the amount of data charted based on the filter criteria. */ + // +optional + DashboardFilters []DashboardDashboardFilters `json:"dashboardFilters,omitempty"` + /* Required. The mutable, human-readable name. */ DisplayName string `json:"displayName"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index ee47d9cde8..540aea3ee8 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -684,6 +684,37 @@ func (in *DashboardColumns) DeepCopy() *DashboardColumns { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardDashboardFilters) DeepCopyInto(out *DashboardDashboardFilters) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.TemplateVariable != nil { + in, out := &in.TemplateVariable, &out.TemplateVariable + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardDashboardFilters. +func (in *DashboardDashboardFilters) DeepCopy() *DashboardDashboardFilters { + if in == nil { + return nil + } + out := new(DashboardDashboardFilters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DashboardDataSets) DeepCopyInto(out *DashboardDataSets) { *out = *in @@ -2170,6 +2201,13 @@ func (in *MonitoringDashboardSpec) DeepCopyInto(out *MonitoringDashboardSpec) { *out = new(DashboardColumnLayout) (*in).DeepCopyInto(*out) } + if in.DashboardFilters != nil { + in, out := &in.DashboardFilters, &out.DashboardFilters + *out = make([]DashboardDashboardFilters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.GridLayout != nil { in, out := &in.GridLayout, &out.GridLayout *out = new(DashboardGridLayout) diff --git a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go index 9ed61ce43b..c5553ff97f 100644 --- a/pkg/controller/direct/monitoring/dashboard_generated.mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_generated.mappings.go @@ -109,33 +109,30 @@ func ColumnLayout_Column_ToProto(mapCtx *MapContext, in *krm.ColumnLayout_Column out.Widgets = Slice_ToProto(mapCtx, in.Widgets, Widget_ToProto) return out } - -// func DashboardFilter_FromProto(mapCtx *MapContext, in *pb.DashboardFilter) *krm.DashboardFilter { -// if in == nil { -// return nil -// } -// out := &krm.DashboardFilter{} -// out.LabelKey = LazyPtr(in.GetLabelKey()) -// out.TemplateVariable = LazyPtr(in.GetTemplateVariable()) -// out.StringValue = LazyPtr(in.GetStringValue()) -// out.FilterType = Enum_FromProto(mapCtx, in.FilterType) -// return out -// } - -// func DashboardFilter_ToProto(mapCtx *MapContext, in *krm.DashboardFilter) *pb.DashboardFilter { -// if in == nil { -// return nil -// } -// out := &pb.DashboardFilter{} -// out.LabelKey = ValueOf(in.LabelKey) -// out.TemplateVariable = ValueOf(in.TemplateVariable) -// if oneof := DashboardFilter_StringValue_ToProto(mapCtx, in.StringValue); oneof != nil { -// out.DefaultValue = oneof -// } -// out.FilterType = Enum_ToProto[pb.DashboardFilter_FilterType](mapCtx, in.FilterType) -// return out -// } - +func DashboardFilter_FromProto(mapCtx *MapContext, in *pb.DashboardFilter) *krm.DashboardFilter { + if in == nil { + return nil + } + out := &krm.DashboardFilter{} + out.LabelKey = LazyPtr(in.GetLabelKey()) + out.TemplateVariable = LazyPtr(in.GetTemplateVariable()) + out.StringValue = LazyPtr(in.GetStringValue()) + out.FilterType = Enum_FromProto(mapCtx, in.FilterType) + return out +} +func DashboardFilter_ToProto(mapCtx *MapContext, in *krm.DashboardFilter) *pb.DashboardFilter { + if in == nil { + return nil + } + out := &pb.DashboardFilter{} + out.LabelKey = ValueOf(in.LabelKey) + out.TemplateVariable = ValueOf(in.TemplateVariable) + if oneof := DashboardFilter_StringValue_ToProto(mapCtx, in.StringValue); oneof != nil { + out.DefaultValue = oneof + } + out.FilterType = Enum_ToProto[pb.DashboardFilter_FilterType](mapCtx, in.FilterType) + return out +} func GridLayout_FromProto(mapCtx *MapContext, in *pb.GridLayout) *krm.GridLayout { if in == nil { return nil @@ -204,7 +201,7 @@ func MonitoringDashboardSpec_FromProto(mapCtx *MapContext, in *pb.Dashboard) *kr out.MosaicLayout = MosaicLayout_FromProto(mapCtx, in.GetMosaicLayout()) out.RowLayout = RowLayout_FromProto(mapCtx, in.GetRowLayout()) out.ColumnLayout = ColumnLayout_FromProto(mapCtx, in.GetColumnLayout()) - // MISSING: DashboardFilters + out.DashboardFilters = Slice_FromProto(mapCtx, in.DashboardFilters, DashboardFilter_FromProto) // MISSING: Labels return out } @@ -228,7 +225,7 @@ func MonitoringDashboardSpec_ToProto(mapCtx *MapContext, in *krm.MonitoringDashb if oneof := ColumnLayout_ToProto(mapCtx, in.ColumnLayout); oneof != nil { out.Layout = &pb.Dashboard_ColumnLayout{ColumnLayout: oneof} } - // MISSING: DashboardFilters + out.DashboardFilters = Slice_ToProto(mapCtx, in.DashboardFilters, DashboardFilter_ToProto) // MISSING: Labels return out } diff --git a/pkg/controller/direct/monitoring/dashboard_mappings.go b/pkg/controller/direct/monitoring/dashboard_mappings.go index 03ddb3d2a1..5f4266b8f7 100644 --- a/pkg/controller/direct/monitoring/dashboard_mappings.go +++ b/pkg/controller/direct/monitoring/dashboard_mappings.go @@ -240,3 +240,12 @@ func MonitoredResource_ToProto(mapCtx *MapContext, in *krm.MonitoredResource) *m out.Labels = in.Labels return out } + +func DashboardFilter_StringValue_ToProto(mapCtx *MapContext, in *string) *pb.DashboardFilter_StringValue { + if in == nil { + return nil + } + out := &pb.DashboardFilter_StringValue{} + out.StringValue = *in + return out +} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden index 217d7da053..95456d1ce5 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_export_monitoringdashboardfull.golden @@ -131,6 +131,13 @@ spec: - foo - bar title: IncidentList Widget + dashboardFilters: + - filterType: RESOURCE_LABEL + labelKey: instance_id + stringValue: "3133577226154888113" + templateVariable: iid + - filterType: RESOURCE_LABEL + labelKey: zone displayName: monitoringdashboard-full projectRef: external: ${projectId} \ No newline at end of file diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml index 3419e0601c..a960fe2da2 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_generated_object_monitoringdashboardfull.golden.yaml @@ -139,6 +139,13 @@ spec: - foo - bar title: IncidentList Widget + dashboardFilters: + - filterType: RESOURCE_LABEL + labelKey: instance_id + stringValue: "3133577226154888113" + templateVariable: iid + - filterType: RESOURCE_LABEL + labelKey: zone displayName: monitoringdashboard-full projectRef: external: ${projectId} diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log index 1e9b7c4108..3d280ff1e4 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/_http.log @@ -403,6 +403,18 @@ x-goog-request-params: parent=projects%2F${projectId} } ] }, + "dashboardFilters": [ + { + "filterType": 1, + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": 1, + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" } @@ -632,6 +644,18 @@ X-Xss-Protection: 0 } ] }, + "dashboardFilters": [ + { + "filterType": "RESOURCE_LABEL", + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": "RESOURCE_LABEL", + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "etag": "abcdef0123A=", "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" @@ -869,6 +893,18 @@ X-Xss-Protection: 0 } ] }, + "dashboardFilters": [ + { + "filterType": "RESOURCE_LABEL", + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": "RESOURCE_LABEL", + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "etag": "abcdef0123A=", "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" @@ -1093,6 +1129,18 @@ x-goog-request-params: dashboard.name=projects%2F${projectId}%2Fdashboards%2Fmon } ] }, + "dashboardFilters": [ + { + "filterType": 1, + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": 1, + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "name": "projects/${projectId}/dashboards/monitoringdashboard-${uniqueId}" } @@ -1322,6 +1370,18 @@ X-Xss-Protection: 0 } ] }, + "dashboardFilters": [ + { + "filterType": "RESOURCE_LABEL", + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": "RESOURCE_LABEL", + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "etag": "abcdef0123A=", "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" @@ -1559,6 +1619,18 @@ X-Xss-Protection: 0 } ] }, + "dashboardFilters": [ + { + "filterType": "RESOURCE_LABEL", + "labelKey": "instance_id", + "stringValue": "3133577226154888113", + "templateVariable": "iid" + }, + { + "filterType": "RESOURCE_LABEL", + "labelKey": "zone" + } + ], "displayName": "monitoringdashboard-full", "etag": "abcdef0123A=", "name": "projects/${projectNumber}/dashboards/monitoringdashboard-${uniqueId}" diff --git a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml index dd0bb817d4..a1e560cc1f 100644 --- a/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/monitoring/v1beta1/monitoringdashboard/monitoringdashboardfull/create.yaml @@ -17,6 +17,13 @@ kind: MonitoringDashboard metadata: name: monitoringdashboard-${uniqueId} spec: + dashboardFilters: + - filterType: RESOURCE_LABEL + labelKey: instance_id + stringValue: "3133577226154888113" + templateVariable: iid + - filterType: RESOURCE_LABEL + labelKey: zone displayName: "monitoringdashboard-full" columnLayout: columns: diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 71e301c5e5..227a57a235 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -380,6 +380,11 @@ columnLayout: yAxis: label: string scale: string +dashboardFilters: +- filterType: string + labelKey: string + stringValue: string + templateVariable: string displayName: string gridLayout: columns: integer @@ -5285,6 +5290,66 @@ rowLayout:

{% verbatim %}The axis scale. By default, a linear scale is used.{% endverbatim %}

+ + +

dashboardFilters

+

Optional

+ + +

list (object)

+

{% verbatim %}Filters to reduce the amount of data charted based on the filter criteria.{% endverbatim %}

+ + + + +

dashboardFilters[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

dashboardFilters[].filterType

+

Optional

+ + +

string

+

{% verbatim %}The specified filter type{% endverbatim %}

+ + + + +

dashboardFilters[].labelKey

+

Required*

+ + +

string

+

{% verbatim %}Required. The key for the label{% endverbatim %}

+ + + + +

dashboardFilters[].stringValue

+

Optional

+ + +

string

+

{% verbatim %}A variable-length string value.{% endverbatim %}

+ + + + +

dashboardFilters[].templateVariable

+

Optional

+ + +

string

+

{% verbatim %}The placeholder text that can be referenced in a filter string or MQL query. If omitted, the dashboard filter will be applied to all relevant widgets in the dashboard.{% endverbatim %}

+ +

displayName