diff --git a/mmv1/api/async.go b/mmv1/api/async.go
index f947c62741b3..b6ea3c7291f6 100644
--- a/mmv1/api/async.go
+++ b/mmv1/api/async.go
@@ -58,6 +58,7 @@ func NewOperation() *Operation {
 	return op
 }
 
+// It is only used in openapi-generate
 func NewAsync() *Async {
 	oa := &Async{
 		Actions:   []string{"create", "delete", "update"},
@@ -150,6 +151,9 @@ func (a *Async) UnmarshalYAML(unmarshal func(any) error) error {
 		return err
 	}
 
+	if a.Type == "" {
+		a.Type = "OpAsync"
+	}
 	if a.Type == "PollAsync" && a.TargetOccurrences == 0 {
 		a.TargetOccurrences = 1
 	}
diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterResource.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterResource.yaml
index e5fb2f303ba2..de7585e6ae6c 100644
--- a/mmv1/products/accesscontextmanager/ServicePerimeterResource.yaml
+++ b/mmv1/products/accesscontextmanager/ServicePerimeterResource.yaml
@@ -43,7 +43,7 @@ create_verb: 'PATCH'
 update_mask: true
 delete_verb: 'PATCH'
 immutable: true
-mutex: '{{perimeter_name}}'
+mutex: '{{access_policy_id}}'
 import_format:
   - '{{perimeter_name}}/{{resource}}'
 timeouts:
@@ -67,6 +67,7 @@ nested_query:
   is_list_of_ids: true
   modify_by_patch: true
 custom_code:
+  encoder: 'templates/terraform/encoders/access_context_manager_service_perimeter_resource.go.tmpl'
   custom_import: 'templates/terraform/custom_import/access_context_manager_service_perimeter_resource.go.tmpl'
   post_read: 'templates/terraform/post_read/access_context_manager_service_perimeter_resource.go.tmpl'
 exclude_tgc: true
@@ -97,3 +98,10 @@ properties:
       Format: projects/{project_number}
     required: true
     immutable: true
+  - name: 'accessPolicyId'
+    type: String
+    description: |
+      The name of the Access Policy this resource belongs to.
+    ignore_read: true
+    immutable: true
+    output: true
diff --git a/mmv1/products/chronicle/DataAccessLabel.yaml b/mmv1/products/chronicle/DataAccessLabel.yaml
index 36428850ff45..64f27da66e7d 100644
--- a/mmv1/products/chronicle/DataAccessLabel.yaml
+++ b/mmv1/products/chronicle/DataAccessLabel.yaml
@@ -55,9 +55,9 @@ parameters:
     type: String
     description: |-
       Required. The ID to use for the data access label, which will become the label's
-      display name and the final component of the label's resource name. It must
-      only contain ASCII lowercase letters, numbers, and dashes; it must begin
-      with a letter, and it must not exceed 1000 characters.
+      display name and the final component of the label's resource name. The
+      maximum number of characters should be 63. Regex pattern is as per AIP:
+      https://google.aip.dev/122#resource-id-segments
     immutable: true
     url_param_only: true
     required: true
diff --git a/mmv1/products/chronicle/ReferenceList.yaml b/mmv1/products/chronicle/ReferenceList.yaml
new file mode 100644
index 000000000000..bc79d1796945
--- /dev/null
+++ b/mmv1/products/chronicle/ReferenceList.yaml
@@ -0,0 +1,141 @@
+# Copyright 2025 Google Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+name: ReferenceList
+description: Reference lists are user-defined lists of values which users can use in multiple Rules.
+min_version: 'beta'
+references:
+  guides:
+    'Google SecOps Guides': 'https://cloud.google.com/chronicle/docs/secops/secops-overview'
+  api: 'https://cloud.google.com/chronicle/docs/reference/rest/v1alpha/projects.locations.instances.referenceLists'
+base_url: projects/{{project}}/locations/{{location}}/instances/{{instance}}/referenceLists
+self_link: projects/{{project}}/locations/{{location}}/instances/{{instance}}/referenceLists/{{reference_list_id}}
+create_url: projects/{{project}}/locations/{{location}}/instances/{{instance}}/referenceLists?referenceListId={{reference_list_id}}
+id_format: projects/{{project}}/locations/{{location}}/instances/{{instance}}/referenceLists/{{reference_list_id}}
+import_format:
+  - projects/{{project}}/locations/{{location}}/instances/{{instance}}/referenceLists/{{reference_list_id}}
+update_verb: PATCH
+update_mask: true
+exclude_delete: true
+
+examples:
+  - name: 'chronicle_referencelist_basic'
+    primary_resource_id: 'example'
+    min_version: 'beta'
+    vars:
+      reference_list_id: reference_list_id
+    test_env_vars:
+      chronicle_id: 'CHRONICLE_ID'
+
+parameters:
+  - name: location
+    type: String
+    description: The location of the resource. This is the geographical region where the Chronicle instance resides, such as "us" or "europe-west2".
+    immutable: true
+    url_param_only: true
+    required: true
+  - name: instance
+    type: String
+    description: The unique identifier for the Chronicle instance, which is the same as the customer ID.
+    immutable: true
+    url_param_only: true
+    required: true
+  - name: referenceListId
+    type: String
+    description: |-
+      Required. The ID to use for the reference list. This is also the display name for
+      the reference list. It must satisfy the following requirements:
+      - Starts with letter.
+      - Contains only letters, numbers and underscore.
+      - Has length < 256.
+      - Must be unique.
+    immutable: true
+    url_param_only: true
+    required: true
+properties:
+  - name: name
+    type: String
+    description: |-
+      Output only. The resource name of the reference list.
+      Format:
+      projects/{project}/locations/{location}/instances/{instance}/referenceLists/{reference_list}
+    output: true
+  - name: description
+    type: String
+    description: Required. A user-provided description of the reference list.
+    required: true
+  - name: entries
+    type: Array
+    description: |-
+      Required. The entries of the reference list.
+      When listed, they are returned in the order that was specified at creation
+      or update. The combined size of the values of the reference list may not
+      exceed 6MB.
+      This is returned only when the view is REFERENCE_LIST_VIEW_FULL.
+    required: true
+    item_type:
+      type: NestedObject
+      properties:
+        - name: value
+          type: String
+          description: Required. The value of the entry. Maximum length is 512 characters.
+          required: true
+  - name: scopeInfo
+    type: NestedObject
+    output: true
+    description: ScopeInfo specifies the scope info of the reference list.
+    properties:
+      - name: referenceListScope
+        type: NestedObject
+        description: ReferenceListScope specifies the list of scope names of the reference list.
+        required: true
+        properties:
+          - name: scopeNames
+            type: Array
+            description: |-
+              Optional. The list of scope names of the reference list. The scope names should be
+              full resource names and should be of the format:
+              "projects/{project}/locations/{location}/instances/{instance}/dataAccessScopes/{scope_name}".
+            item_type:
+              type: String
+  - name: displayName
+    type: String
+    description: Output only. The unique display name of the reference list.
+    output: true
+  - name: revisionCreateTime
+    type: String
+    description: Output only. The timestamp when the reference list was last updated.
+    output: true
+  - name: rules
+    type: Array
+    description: |-
+      Output only. The resource names for the associated self-authored Rules that use this
+      reference list.
+      This is returned only when the view is REFERENCE_LIST_VIEW_FULL.
+    output: true
+    item_type:
+      type: String
+  - name: syntaxType
+    type: String
+    description: |2-
+
+      Possible values:
+      REFERENCE_LIST_SYNTAX_TYPE_PLAIN_TEXT_STRING
+      REFERENCE_LIST_SYNTAX_TYPE_REGEX
+      REFERENCE_LIST_SYNTAX_TYPE_CIDR
+    required: true
+  - name: ruleAssociationsCount
+    type: Integer
+    description: Output only. The count of self-authored rules using the reference list.
+    output: true
diff --git a/mmv1/products/chronicle/RuleDeployment.yaml b/mmv1/products/chronicle/RuleDeployment.yaml
new file mode 100644
index 000000000000..d21ac17d76de
--- /dev/null
+++ b/mmv1/products/chronicle/RuleDeployment.yaml
@@ -0,0 +1,135 @@
+# Copyright 2025 Google Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+name: RuleDeployment
+description: The RuleDeployment resource represents the deployment state of a Rule.
+min_version: 'beta'
+references:
+  guides:
+    'Google SecOps Guides': 'https://cloud.google.com/chronicle/docs/secops/secops-overview'
+  api: 'https://cloud.google.com/chronicle/docs/reference/rest/v1alpha/RuleDeployment'
+base_url: projects/{{project}}/locations/{{location}}/instances/{{instance}}/rules/{{rules}}/deployments
+self_link: projects/{{project}}/locations/{{location}}/instances/{{instance}}/rules/{{rule}}/deployment
+create_url: projects/{{project}}/locations/{{location}}/instances/{{instance}}/rules/{{rule}}/deployment?updateMask=enabled,alerting,archived,runFrequency
+id_format: projects/{{project}}/locations/{{location}}/instances/{{instance}}/rules/{{rule}}/deployment
+import_format:
+  - projects/{{project}}/locations/{{location}}/instances/{{instance}}/rules/{{rule}}/deployment
+create_verb: PATCH
+update_verb: PATCH
+update_mask: true
+exclude_delete: true
+
+examples:
+  - name: 'chronicle_ruledeployment_basic'
+    primary_resource_id: 'example'
+    min_version: 'beta'
+    test_env_vars:
+      chronicle_id: 'CHRONICLE_ID'
+
+parameters:
+  - name: location
+    type: String
+    description: The location of the resource. This is the geographical region where the Chronicle instance resides, such as "us" or "europe-west2".
+    immutable: true
+    url_param_only: true
+    required: true
+  - name: instance
+    type: String
+    description: The unique identifier for the Chronicle instance, which is the same as the customer ID.
+    immutable: true
+    url_param_only: true
+    required: true
+  - name: rule
+    type: String
+    description: The Rule ID of the rule.
+    immutable: true
+    url_param_only: true
+    required: true
+properties:
+  - name: name
+    type: String
+    description: |-
+      The resource name of the rule deployment.
+      Note that RuleDeployment is a child of the overall Rule, not any individual
+      revision, so the resource ID segment for the Rule resource must not
+      reference a specific revision.
+      Format:
+      projects/{project}/locations/{location}/instances/{instance}/rules/{rule}/deployment
+    output: true
+  - name: enabled
+    type: Boolean
+    description: Whether the rule is currently deployed continuously against incoming data.
+  - name: alerting
+    type: Boolean
+    description: |-
+      Whether detections resulting from this deployment should be considered
+      alerts.
+  - name: archived
+    type: Boolean
+    description: |-
+      The archive state of the rule deployment.
+      Cannot be set to true unless enabled is set to false.
+      If set to true, alerting will automatically be set to false.
+      If currently set to true, enabled, alerting, and run_frequency cannot be
+      updated.
+  - name: archiveTime
+    type: String
+    description: Output only. The timestamp when the rule deployment archive state was last set to true.
+      If the rule deployment's current archive state is not set to true, the field will be empty.
+    output: true
+  - name: runFrequency
+    type: String
+    description: |2-
+
+      The run frequency of the rule deployment.
+      Possible values:
+      LIVE
+      HOURLY
+      DAILY
+  - name: executionState
+    type: String
+    description: |2-
+
+      The execution state of the rule deployment.
+      Possible values:
+      DEFAULT
+      LIMITED
+      PAUSED
+    output: true
+  - name: producerRules
+    type: Array
+    description: |2-
+      Output only. The names of the associated/chained producer rules. Rules are considered
+      producers for this rule if this rule explicitly filters on their ruleid.
+      Format:
+      projects/{project}/locations/{location}/instances/{instance}/rules/{rule}
+    output: true
+    item_type:
+      type: String
+  - name: consumerRules
+    type: Array
+    description: |2-
+      Output only. The names of the associated/chained consumer rules. Rules are considered
+      consumers of this rule if their rule text explicitly filters on this rule's ruleid.
+      Format:
+      projects/{project}/locations/{location}/instances/{instance}/rules/{rule}
+    output: true
+    item_type:
+      type: String
+  - name: lastAlertStatusChangeTime
+    type: String
+    description: Output only. The timestamp when the rule deployment alert state was lastly changed.
+      This is filled regardless of the current alert state.E.g. if the current alert status is false,
+      this timestamp will be the timestamp when the alert status was changed to false.
+    output: true
diff --git a/mmv1/products/compute/InterconnectAttachment.yaml b/mmv1/products/compute/InterconnectAttachment.yaml
index a31185bfe08b..d3f5945096b7 100644
--- a/mmv1/products/compute/InterconnectAttachment.yaml
+++ b/mmv1/products/compute/InterconnectAttachment.yaml
@@ -130,6 +130,7 @@ properties:
       - 'BPS_10G'
       - 'BPS_20G'
       - 'BPS_50G'
+      - 'BPS_100G'
   - name: 'edgeAvailabilityDomain'
     type: String
     description: |
diff --git a/mmv1/products/compute/UrlMap.yaml b/mmv1/products/compute/UrlMap.yaml
index 36371f2de68d..c1a3e5ca7111 100644
--- a/mmv1/products/compute/UrlMap.yaml
+++ b/mmv1/products/compute/UrlMap.yaml
@@ -19,6 +19,7 @@ description: |
   that you define for the host and path of an incoming URL.
 references:
   guides:
+    'Official Documentation': 'https://cloud.google.com/load-balancing/docs/url-map-concepts'
   api: 'https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps'
 docs:
 base_url: 'projects/{{project}}/global/urlMaps'
diff --git a/mmv1/products/gemini/CodeRepositoryIndex.yaml b/mmv1/products/gemini/CodeRepositoryIndex.yaml
index 62cb5236c845..c2730c35bfe6 100644
--- a/mmv1/products/gemini/CodeRepositoryIndex.yaml
+++ b/mmv1/products/gemini/CodeRepositoryIndex.yaml
@@ -51,9 +51,18 @@ async:
   result:
     resource_inside_response: true
   include_project: false
+custom_code:
+  pre_delete: templates/terraform/pre_delete/code_repository_index_force_delete.go.tmpl
 error_retry_predicates:
   - 'transport_tpg.IsCodeRepositoryIndexUnreadyError'
   - 'transport_tpg.IsRepositoryGroupQueueError'
+virtual_fields:
+  - name: 'force_destroy'
+    description:
+      If set to true, will allow deletion of the CodeRepositoryIndex even if there are existing
+      RepositoryGroups for the resource. These RepositoryGroups will also be deleted.
+    type: Boolean
+    default_value: false
 parameters:
   - name: location
     type: String
diff --git a/mmv1/products/parametermanager/Parameter.yaml b/mmv1/products/parametermanager/Parameter.yaml
new file mode 100644
index 000000000000..7851e09d4729
--- /dev/null
+++ b/mmv1/products/parametermanager/Parameter.yaml
@@ -0,0 +1,121 @@
+# Copyright 2024 Google Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+name: 'Parameter'
+description: |
+  A Parameter resource is a logical parameter.
+min_version: 'beta'
+references:
+  guides:
+  api: 'https://cloud.google.com/secret-manager/parameter-manager/docs/reference/rest/v1/projects.locations.parameters'
+docs:
+base_url: 'projects/{{project}}/locations/global/parameters'
+self_link: 'projects/{{project}}/locations/global/parameters/{{parameter_id}}'
+create_url: 'projects/{{project}}/locations/global/parameters?parameter_id={{parameter_id}}'
+update_verb: 'PATCH'
+update_mask: true
+import_format:
+  - 'projects/{{project}}/locations/global/parameters/{{parameter_id}}'
+timeouts:
+  insert_minutes: 20
+  update_minutes: 20
+  delete_minutes: 20
+examples:
+  - name: 'parameter_config_basic'
+    primary_resource_id: 'parameter-basic'
+    min_version: 'beta'
+    vars:
+      parameter_id: 'parameter'
+  - name: 'parameter_with_format'
+    primary_resource_id: 'parameter-with-format'
+    min_version: 'beta'
+    vars:
+      parameter_id: 'parameter'
+  - name: 'parameter_with_labels'
+    primary_resource_id: 'parameter-with-labels'
+    min_version: 'beta'
+    vars:
+      parameter_id: 'parameter'
+parameters:
+  - name: 'parameterId'
+    type: String
+    description: |
+      This must be unique within the project.
+    url_param_only: true
+    required: true
+    immutable: true
+properties:
+  - name: 'name'
+    type: String
+    description: |
+      The resource name of the Parameter. Format:
+      `projects/{{project}}/locations/global/parameters/{{parameter_id}}`
+    output: true
+  - name: 'createTime'
+    type: String
+    description: |
+      The time at which the Parameter was created.
+    output: true
+  - name: 'updateTime'
+    type: String
+    description: |
+      The time at which the Parameter was updated.
+    output: true
+  - name: 'policyMember'
+    type: NestedObject
+    description: |
+      Policy member strings of a Google Cloud resource.
+    output: true
+    properties:
+      - name: 'iamPolicyUidPrincipal'
+        type: String
+        description: |
+          IAM policy binding member referring to a Google Cloud resource by system-assigned unique identifier.
+          If a resource is deleted and recreated with the same name, the binding will not be applicable to the
+          new resource. Format:
+          `principal://parametermanager.googleapis.com/projects/{{project}}/uid/locations/global/parameters/{{uid}}`
+        output: true
+      - name: 'iamPolicyNamePrincipal'
+        type: String
+        description: |
+          IAM policy binding member referring to a Google Cloud resource by user-assigned name. If a
+          resource is deleted and recreated with the same name, the binding will be applicable to the
+          new resource. Format:
+          `principal://parametermanager.googleapis.com/projects/{{project}}/name/locations/global/parameters/{{parameter_id}}`
+        output: true
+  - name: 'labels'
+    type: KeyValueLabels
+    description: |
+      The labels assigned to this Parameter.
+
+      Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,
+      and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+
+      Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes,
+      and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+
+      No more than 64 labels can be assigned to a given resource.
+
+      An object containing a list of "key": value pairs. Example:
+      { "name": "wrench", "mass": "1.3kg", "count": "3" }.
+  - name: 'format'
+    type: Enum
+    description: |
+      The format type of the parameter resource.
+    default_value: 'UNFORMATTED'
+    immutable: true
+    enum_values:
+      - 'UNFORMATTED'
+      - 'YAML'
+      - 'JSON'
diff --git a/mmv1/products/parametermanager/product.yaml b/mmv1/products/parametermanager/product.yaml
new file mode 100644
index 000000000000..4a0184ba77c7
--- /dev/null
+++ b/mmv1/products/parametermanager/product.yaml
@@ -0,0 +1,21 @@
+# Copyright 2024 Google Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+name: 'ParameterManager'
+display_name: 'Parameter Manager'
+versions:
+  - name: 'beta'
+    base_url: 'https://parametermanager.googleapis.com/v1/'
+scopes:
+  - 'https://www.googleapis.com/auth/cloud-platform'
diff --git a/mmv1/products/parametermanagerregional/RegionalParameter.yaml b/mmv1/products/parametermanagerregional/RegionalParameter.yaml
index 8ad83bd80379..1862000ac606 100644
--- a/mmv1/products/parametermanagerregional/RegionalParameter.yaml
+++ b/mmv1/products/parametermanagerregional/RegionalParameter.yaml
@@ -121,6 +121,8 @@ properties:
     description: |
       The format type of the regional parameter. Default value is UNFORMATTED.
     default_from_api: true
+      The format type of the regional parameter.
+    default_value: 'UNFORMATTED'
     immutable: true
     enum_values:
       - 'UNFORMATTED'
diff --git a/mmv1/templates/terraform/custom_import/access_context_manager_service_perimeter_resource.go.tmpl b/mmv1/templates/terraform/custom_import/access_context_manager_service_perimeter_resource.go.tmpl
index 77b57820573e..075587c706a0 100644
--- a/mmv1/templates/terraform/custom_import/access_context_manager_service_perimeter_resource.go.tmpl
+++ b/mmv1/templates/terraform/custom_import/access_context_manager_service_perimeter_resource.go.tmpl
@@ -18,6 +18,9 @@
 		return nil, err
 	}
 
+	if err := d.Set("access_policy_id", fmt.Sprintf("accessPolicies/%s", parts["accessPolicy"])); err != nil {
+		return nil, fmt.Errorf("Error setting access_policy_id: %s", err)
+	}
 	if err := d.Set("perimeter_name", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil {
 		return nil, fmt.Errorf("Error setting perimeter_name: %s", err)
 	}
diff --git a/mmv1/templates/terraform/encoders/access_context_manager_service_perimeter_resource.go.tmpl b/mmv1/templates/terraform/encoders/access_context_manager_service_perimeter_resource.go.tmpl
new file mode 100644
index 000000000000..0e38e7e9dcc1
--- /dev/null
+++ b/mmv1/templates/terraform/encoders/access_context_manager_service_perimeter_resource.go.tmpl
@@ -0,0 +1,8 @@
+// Set the access_policy_id field from part of the perimeter_name parameter.
+
+// The is logic is inside the encoder since the access_policy_id field is part of
+// the mutex lock and encoders run before the lock is set.
+parts := strings.Split(d.Get("perimeter_name").(string), "/")
+d.Set("access_policy_id", fmt.Sprintf("accessPolicies/%s", parts[1]))
+
+return obj, nil
\ No newline at end of file
diff --git a/mmv1/templates/terraform/examples/chronicle_referencelist_basic.tf.tmpl b/mmv1/templates/terraform/examples/chronicle_referencelist_basic.tf.tmpl
new file mode 100644
index 000000000000..5899e5a34ca5
--- /dev/null
+++ b/mmv1/templates/terraform/examples/chronicle_referencelist_basic.tf.tmpl
@@ -0,0 +1,11 @@
+resource "google_chronicle_reference_list" "{{$.PrimaryResourceId}}" {
+ provider = "google-beta"
+ location = "us"
+ instance = "{{index $.TestEnvVars "chronicle_id"}}"
+ reference_list_id = "{{index $.Vars "reference_list_id"}}"
+ description = "referencelist-description"
+ entries {
+  value = "referencelist-entry-value"
+ }
+ syntax_type = "REFERENCE_LIST_SYNTAX_TYPE_PLAIN_TEXT_STRING"
+}
diff --git a/mmv1/templates/terraform/examples/chronicle_ruledeployment_basic.tf.tmpl b/mmv1/templates/terraform/examples/chronicle_ruledeployment_basic.tf.tmpl
new file mode 100644
index 000000000000..26e6a2e4dd93
--- /dev/null
+++ b/mmv1/templates/terraform/examples/chronicle_ruledeployment_basic.tf.tmpl
@@ -0,0 +1,19 @@
+resource "google_chronicle_rule" "my-rule" {
+ provider = "google-beta"
+ location = "us"
+ instance = "{{index $.TestEnvVars "chronicle_id"}}"
+ text = <<-EOT
+             rule test_rule { meta: events:  $userid = $e.principal.user.userid  match: $userid over 10m condition: $e }
+         EOT
+}
+
+resource "google_chronicle_rule_deployment" "{{$.PrimaryResourceId}}" {
+ provider = "google-beta"
+ location = "us"
+ instance = "{{index $.TestEnvVars "chronicle_id"}}"
+ rule = element(split("/", resource.google_chronicle_rule.my-rule.name), length(split("/", resource.google_chronicle_rule.my-rule.name)) - 1)
+ enabled = true
+ alerting = true
+ archived = false
+ run_frequency = "DAILY"
+}
diff --git a/mmv1/templates/terraform/examples/parameter_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/parameter_config_basic.tf.tmpl
new file mode 100644
index 000000000000..b31380eee821
--- /dev/null
+++ b/mmv1/templates/terraform/examples/parameter_config_basic.tf.tmpl
@@ -0,0 +1,4 @@
+resource "google_parameter_manager_parameter" "{{$.PrimaryResourceId}}" {
+  provider = google-beta
+  parameter_id = "{{index $.Vars "parameter_id"}}"
+}
diff --git a/mmv1/templates/terraform/examples/parameter_with_format.tf.tmpl b/mmv1/templates/terraform/examples/parameter_with_format.tf.tmpl
new file mode 100644
index 000000000000..dd78230835a4
--- /dev/null
+++ b/mmv1/templates/terraform/examples/parameter_with_format.tf.tmpl
@@ -0,0 +1,5 @@
+resource "google_parameter_manager_parameter" "{{$.PrimaryResourceId}}" {
+  provider = google-beta
+  parameter_id = "{{index $.Vars "parameter_id"}}"
+  format = "JSON"
+}
diff --git a/mmv1/templates/terraform/examples/parameter_with_labels.tf.tmpl b/mmv1/templates/terraform/examples/parameter_with_labels.tf.tmpl
new file mode 100644
index 000000000000..8d990cd7a1a3
--- /dev/null
+++ b/mmv1/templates/terraform/examples/parameter_with_labels.tf.tmpl
@@ -0,0 +1,12 @@
+resource "google_parameter_manager_parameter" "{{$.PrimaryResourceId}}" {
+  provider = google-beta
+  parameter_id = "{{index $.Vars "parameter_id"}}"
+
+  labels = {
+    key1 = "val1"
+    key2 = "val2"
+    key3 = "val3"
+    key4 = "val4"
+    key5 = "val5"
+  }
+}
diff --git a/mmv1/templates/terraform/pre_delete/code_repository_index_force_delete.go.tmpl b/mmv1/templates/terraform/pre_delete/code_repository_index_force_delete.go.tmpl
new file mode 100644
index 000000000000..a451d1ece932
--- /dev/null
+++ b/mmv1/templates/terraform/pre_delete/code_repository_index_force_delete.go.tmpl
@@ -0,0 +1,8 @@
+{{- if ne $.TargetVersionName "ga" -}}
+obj = make(map[string]interface{})
+if v, ok := d.GetOk("force_destroy"); ok {
+	if v == true {
+		obj["force"] = true
+	}
+}
+{{- end }}
diff --git a/mmv1/templates/terraform/pre_update/workbench_instance.go.tmpl b/mmv1/templates/terraform/pre_update/workbench_instance.go.tmpl
index f33e2afefa96..c74166b07abe 100644
--- a/mmv1/templates/terraform/pre_update/workbench_instance.go.tmpl
+++ b/mmv1/templates/terraform/pre_update/workbench_instance.go.tmpl
@@ -27,6 +27,10 @@ if d.HasChange("gce_setup.0.metadata") {
 if d.HasChange("effective_labels") {
     newUpdateMask = append(newUpdateMask, "labels")
 }
+if d.HasChange("gce_setup.0.container_image") {
+    newUpdateMask = append(newUpdateMask, "gce_setup.container_image")
+    stopInstance = true
+}
 updateMask = newUpdateMask
 // Overwrite the previously set mask.
 url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")})
diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl
index 73c2d18e467a..7751309e81d4 100644
--- a/mmv1/templates/terraform/resource.go.tmpl
+++ b/mmv1/templates/terraform/resource.go.tmpl
@@ -314,8 +314,8 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{
     d.SetId(id)
 
 {{if and $.GetAsync ($.GetAsync.Allow "Create") -}}
-{{if ($.GetAsync.IsA "OpAsync") -}}
-{{if and $.GetAsync.Result.ResourceInsideResponse $.GetIdentity -}}
+{{  if ($.GetAsync.IsA "OpAsync") -}}
+{{    if and $.GetAsync.Result.ResourceInsideResponse $.GetIdentity -}}
     // Use the resource in the operation response to populate
     // identity fields and d.Id() before read
     var opRes map[string]interface{}
@@ -330,7 +330,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{
         // The resource didn't actually create
         d.SetId("")
 
-{{ end -}}
+{{           end -}}
         return fmt.Errorf("Error waiting to create {{ $.Name -}}: %s", err)
     }
 
@@ -390,8 +390,8 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{
     }
 
 {{        end  -}}
-{{      end -}}
-{{    end -}}
+{{      end -}}{{/*if ($.GetAsync.IsA "OpAsync")*/}}
+{{    end -}}{{/*if and $.GetAsync ($.GetAsync.Allow "Create")*/}}
 {{if $.CustomCode.PostCreate -}} 
     {{- $.CustomTemplate $.CustomCode.PostCreate false -}}
 {{- end}}
diff --git a/mmv1/third_party/terraform/.teamcity/USE_CONFIG_WITH_TEAMCITY.md b/mmv1/third_party/terraform/.teamcity/USE_CONFIG_WITH_TEAMCITY.md
index 17c81477f036..6cbd2b4f4b9d 100644
--- a/mmv1/third_party/terraform/.teamcity/USE_CONFIG_WITH_TEAMCITY.md
+++ b/mmv1/third_party/terraform/.teamcity/USE_CONFIG_WITH_TEAMCITY.md
@@ -95,6 +95,9 @@ The next step is provide some input values that the configuration needs to fully
 | org2Ga | Used to set the [GOOGLE_ORG_2](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/envvar/envvar_utils.go#L73-L75) environment variable in acceptance tests - GA specific |
 | org2Beta | Used to set the [GOOGLE_ORG_2](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/envvar/envvar_utils.go#L73-L75) environment variable in acceptance tests - Beta specific |
 | org2Vcr | Used to set the [GOOGLE_ORG_2](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/envvar/envvar_utils.go#L73-L75) environment variable in acceptance tests - VCR specific |
+| chronicleInstanceIdGa | Used to set the [GOOGLE_CHRONICLE_INSTANCE_ID](https://github.com/GoogleCloudPlatform/magic-modules/blob/03a313d13c5d31f8e8fc71cb32d06157bc260f78/mmv1/third_party/terraform/envvar/envvar_utils.go#L107-L112) environment variable in acceptance tests - GA specific |
+| chronicleInstanceIdBeta | Used to set the [GOOGLE_CHRONICLE_INSTANCE_ID](https://github.com/GoogleCloudPlatform/magic-modules/blob/03a313d13c5d31f8e8fc71cb32d06157bc260f78/mmv1/third_party/terraform/envvar/envvar_utils.go#L107-L112) environment variable in acceptance tests - Beta specific |
+| chronicleInstanceIdVcr | Used to set the [GOOGLE_CHRONICLE_INSTANCE_ID](https://github.com/GoogleCloudPlatform/magic-modules/blob/03a313d13c5d31f8e8fc71cb32d06157bc260f78/mmv1/third_party/terraform/envvar/envvar_utils.go#L107-L112) environment variable in acceptance tests - VCR specific |
 | billingAccount | Used to set the [GOOGLE_BILLING_ACCOUNT](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/envvar/envvar_utils.go#L81-L85) ALL environment variable in acceptance tests |
 | billingAccount2 | Used to set the [GOOGLE_BILLING_ACCOUNT_2](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_test.go#L78-L79) environment variable in ALL acceptance tests |
 | custId | Used to set the [GOOGLE_CUST_ID](https://github.com/GoogleCloudPlatform/magic-modules/blob/94a3f91d75ee823c521a0d8d3984a1493fa0926a/mmv1/third_party/terraform/envvar/envvar_utils.go#L52-L56) environment variable in ALL acceptance tests |
diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt
index 7963d0f398ff..efd68430b38a 100644
--- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt
+++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt
@@ -596,6 +596,11 @@ var ServicesListBeta = mapOf(
         "displayName" to "Parallelstore",
         "path" to "./google/services/parallelstore"
     ),
+    "parametermanager" to mapOf(
+        "name" to "parametermanager",
+        "displayName" to "Parametermanager",
+        "path" to "./google-beta/services/parametermanager"
+    ),
     "parametermanagerregional" to mapOf(
         "name" to "parametermanagerregional",
         "displayName" to "Parametermanagerregional",
diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt
index 8bf788776300..efa053fd771e 100644
--- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt
+++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt
@@ -591,6 +591,11 @@ var ServicesListGa = mapOf(
         "displayName" to "Parallelstore",
         "path" to "./google/services/parallelstore"
     ),
+    "parametermanager" to mapOf(
+        "name" to "parametermanager",
+        "displayName" to "Parametermanager",
+        "path" to "./google/services/parametermanager"
+    ),
     "parametermanagerregional" to mapOf(
         "name" to "parametermanagerregional",
         "displayName" to "Parametermanagerregional",
diff --git a/mmv1/third_party/terraform/services/chronicle/resource_chronicle_reference_list_test.go.tmpl b/mmv1/third_party/terraform/services/chronicle/resource_chronicle_reference_list_test.go.tmpl
new file mode 100644
index 000000000000..c3896d61594e
--- /dev/null
+++ b/mmv1/third_party/terraform/services/chronicle/resource_chronicle_reference_list_test.go.tmpl
@@ -0,0 +1,79 @@
+package chronicle_test
+
+{{- if ne $.TargetVersionName "ga" }}
+
+import (
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+
+	"github.com/hashicorp/terraform-provider-google/google/acctest"
+	"github.com/hashicorp/terraform-provider-google/google/envvar"
+)
+
+func TestAccChronicleReferenceList_chronicleReferencelistBasicExample_update(t *testing.T) {
+	t.Parallel()
+
+	context := map[string]interface{}{
+		"chronicle_id":  envvar.GetTestChronicleInstanceIdFromEnv(t),
+		"random_suffix": acctest.RandString(t, 10),
+	}
+
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testAccChronicleReferenceList_chronicleReferencelistBasicExample_basic(context),
+			},
+			{
+				ResourceName:            "google_chronicle_reference_list.example",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"instance", "location", "reference_list_id"},
+			},
+			{
+				Config: testAccChronicleReferenceList_chronicleReferencelistBasicExample_update(context),
+			},
+			{
+				ResourceName:            "google_chronicle_reference_list.example",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"instance", "location", "reference_list_id"},
+			},
+		},
+	})
+}
+
+func testAccChronicleReferenceList_chronicleReferencelistBasicExample_basic(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_chronicle_reference_list" "example" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ reference_list_id = "tf_test_reference_list_id%{random_suffix}"
+ description = "referencelist-description"
+ entries {
+  value = "referencelist-entry-value"
+ }
+ syntax_type = "REFERENCE_LIST_SYNTAX_TYPE_PLAIN_TEXT_STRING"
+}
+`, context)
+}
+
+func testAccChronicleReferenceList_chronicleReferencelistBasicExample_update(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_chronicle_reference_list" "example" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ reference_list_id = "tf_test_reference_list_id%{random_suffix}"
+ description = "referencelist-description-updated"
+ entries {
+  value = "referencelist-entry-value-updated"
+ }
+ syntax_type = "REFERENCE_LIST_SYNTAX_TYPE_REGEX"
+}
+`, context)
+}
+{{- end }}
diff --git a/mmv1/third_party/terraform/services/chronicle/resource_chronicle_rule_deployment_test.go.tmpl b/mmv1/third_party/terraform/services/chronicle/resource_chronicle_rule_deployment_test.go.tmpl
new file mode 100644
index 000000000000..f2dd8cb1d059
--- /dev/null
+++ b/mmv1/third_party/terraform/services/chronicle/resource_chronicle_rule_deployment_test.go.tmpl
@@ -0,0 +1,95 @@
+package chronicle_test
+
+{{- if ne $.TargetVersionName "ga" }}
+
+import (
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+
+	"github.com/hashicorp/terraform-provider-google/google/acctest"
+	"github.com/hashicorp/terraform-provider-google/google/envvar"
+)
+
+func TestAccChronicleRuleDeployment_chronicleRuledeploymentBasicExample_update(t *testing.T) {
+	t.Parallel()
+
+	context := map[string]interface{}{
+		"chronicle_id":  envvar.GetTestChronicleInstanceIdFromEnv(t),
+		"random_suffix": acctest.RandString(t, 10),
+	}
+
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testAccChronicleRuleDeployment_chronicleRuledeploymentBasicExample_basic(context),
+			},
+			{
+				ResourceName:            "google_chronicle_rule_deployment.example",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"instance", "location", "rule"},
+			},
+			{
+				Config: testAccChronicleRuleDeployment_chronicleRuledeploymentBasicExample_update(context),
+			},
+			{
+				ResourceName:            "google_chronicle_rule_deployment.example",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"instance", "location", "rule"},
+			},
+		},
+	})
+}
+
+func testAccChronicleRuleDeployment_chronicleRuledeploymentBasicExample_basic(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_chronicle_rule" "my-rule" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ text = <<-EOT
+             rule test_rule { meta: events:  $userid = $e.principal.user.userid  match: $userid over 10m condition: $e }
+         EOT
+}
+
+resource "google_chronicle_rule_deployment" "example" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ rule = element(split("/", resource.google_chronicle_rule.my-rule.name), length(split("/", resource.google_chronicle_rule.my-rule.name)) - 1)
+ enabled = true
+ alerting = true
+ archived = false
+ run_frequency = "DAILY"
+}
+`, context)
+}
+
+func testAccChronicleRuleDeployment_chronicleRuledeploymentBasicExample_update(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_chronicle_rule" "my-rule" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ text = <<-EOT
+             rule test_rule { meta: events:  $userid = $e.principal.user.userid  match: $userid over 10m condition: $e }
+         EOT
+}
+
+resource "google_chronicle_rule_deployment" "example" {
+ provider = "google-beta"
+ location = "us"
+ instance = "%{chronicle_id}"
+ rule = element(split("/", resource.google_chronicle_rule.my-rule.name), length(split("/", resource.google_chronicle_rule.my-rule.name)) - 1)
+ enabled = false
+ alerting = false
+ archived = false
+ run_frequency = "HOURLY"
+}
+`, context)
+}
+{{- end }}
diff --git a/mmv1/third_party/terraform/services/gemini/resource_gemini_code_repository_index_test.go.tmpl b/mmv1/third_party/terraform/services/gemini/resource_gemini_code_repository_index_test.go.tmpl
index ab1520dbc92b..ddbd7c17168f 100644
--- a/mmv1/third_party/terraform/services/gemini/resource_gemini_code_repository_index_test.go.tmpl
+++ b/mmv1/third_party/terraform/services/gemini/resource_gemini_code_repository_index_test.go.tmpl
@@ -2,6 +2,7 @@ package gemini_test
 {{- if ne $.TargetVersionName "ga" }}
 
 import (
+	"fmt"
 	"os"
 	"testing"
 
@@ -44,6 +45,136 @@ func TestAccGeminiCodeRepositoryIndex_update(t *testing.T) {
 	})
 }
 
+// TestAccGeminiCodeRepositoryIndex_delete checks if there is no error in deleting CRI along with children resource
+// note: this is an example of a bad usage, where RGs refer to the CRI using a string id, not a reference, as they
+// will be force-removed upon CRI deletion, because the CRI provider uses --force option by default
+// The plan after the _delete function should not be empty due to the child resource in plan
+func TestAccGeminiCodeRepositoryIndex_delete(t *testing.T) {
+	bootstrappedKMS := acctest.BootstrapKMSKeyInLocation(t, "us-central1")
+	randomSuffix := acctest.RandString(t, 10)
+	context := map[string]interface{}{
+		"random_suffix": randomSuffix,
+		"project_id": os.Getenv("GOOGLE_PROJECT"),
+		"kms_key": bootstrappedKMS.CryptoKey.Name,
+		"cri_id": fmt.Sprintf("tf-test-cri-index-delete-example-%s", randomSuffix),
+	}
+
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck: func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testAccGeminiCodeRepositoryIndex_withChildren_basic(context),
+			},
+			{
+				ResourceName:            "google_gemini_code_repository_index.example",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"code_repository_index_id", "labels", "location", "terraform_labels", "force_destroy"},
+			},
+			{
+				Config: testAccGeminiCodeRepositoryIndex_withChildren_delete(context),
+        ExpectNonEmptyPlan: true,
+        PlanOnly: true,
+			},
+		},
+	})
+}
+
+func testAccGeminiCodeRepositoryIndex_withChildren_basic(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_gemini_code_repository_index" "example" {
+  provider = google-beta
+  labels = {"ccfe_debug_note": "terraform_e2e_should_be_deleted"}
+  location = "us-central1"
+  code_repository_index_id = "%{cri_id}"
+  force_destroy = true
+}
+
+resource "google_gemini_repository_group" "example" {
+  provider = google-beta
+  location = "us-central1"
+  code_repository_index = "%{cri_id}"
+  repository_group_id = "tf-test-rg-repository-group-id-%{random_suffix}"
+  repositories {
+    resource = "projects/%{project_id}/locations/us-central1/connections/${google_developer_connect_connection.github_conn.connection_id}/gitRepositoryLinks/${google_developer_connect_git_repository_link.conn.git_repository_link_id}"
+    branch_pattern = "main"
+  }
+  labels = {"label1": "value1"}
+  depends_on = [
+    google_gemini_code_repository_index.example
+  ]
+}
+
+resource "google_developer_connect_git_repository_link" "conn" {
+  provider = google-beta
+  git_repository_link_id = "tf-test-repository-conn-delete"
+  parent_connection = google_developer_connect_connection.github_conn.connection_id
+  clone_uri = "https://github.com/CC-R-github-robot/tf-test.git"
+  location = "us-central1"
+  annotations = {}
+}
+
+resource "google_developer_connect_connection" "github_conn" {
+  provider = google-beta
+  location = "us-central1"
+  connection_id = "tf-test-cloudaicompanion-delete-%{random_suffix}"
+  disabled = false
+
+  github_config {
+    github_app = "DEVELOPER_CONNECT"
+    app_installation_id = 54180648
+
+    authorizer_credential {
+      oauth_token_secret_version = "projects/502367051001/secrets/tf-test-cloudaicompanion-github-oauthtoken-c42e5c/versions/1"
+    }
+  }
+}
+`, context)
+}
+
+// Removed depends_on to not break plan test
+func testAccGeminiCodeRepositoryIndex_withChildren_delete(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_gemini_repository_group" "example" {
+  provider = google-beta
+  location = "us-central1"
+  code_repository_index = "%{cri_id}"
+  repository_group_id = "tf-test-rg-repository-group-id-%{random_suffix}"
+  repositories {
+    resource = "projects/%{project_id}/locations/us-central1/connections/${google_developer_connect_connection.github_conn.connection_id}/gitRepositoryLinks/${google_developer_connect_git_repository_link.conn.git_repository_link_id}"
+    branch_pattern = "main"
+  }
+  labels = {"label1": "value1"}
+}
+
+resource "google_developer_connect_git_repository_link" "conn" {
+  provider = google-beta
+  git_repository_link_id = "tf-test-repository-conn-delete"
+  parent_connection = google_developer_connect_connection.github_conn.connection_id
+  clone_uri = "https://github.com/CC-R-github-robot/tf-test.git"
+  location = "us-central1"
+  annotations = {}
+}
+
+resource "google_developer_connect_connection" "github_conn" {
+  provider = google-beta
+  location = "us-central1"
+  connection_id = "tf-test-cloudaicompanion-delete-%{random_suffix}"
+  disabled = false
+
+  github_config {
+    github_app = "DEVELOPER_CONNECT"
+    app_installation_id = 54180648
+
+    authorizer_credential {
+      oauth_token_secret_version = "projects/502367051001/secrets/tf-test-cloudaicompanion-github-oauthtoken-c42e5c/versions/1"
+    }
+  }
+}
+`, context)
+}
+
 func testAccGeminiCodeRepositoryIndex_basic(context map[string]interface{}) string {
 	return acctest.Nprintf(`
 resource "google_gemini_code_repository_index" "example" {
diff --git a/mmv1/third_party/terraform/services/parametermanager/resource_parameter_manager_parameter_test.go.tmpl b/mmv1/third_party/terraform/services/parametermanager/resource_parameter_manager_parameter_test.go.tmpl
new file mode 100644
index 000000000000..9e35e39f7abe
--- /dev/null
+++ b/mmv1/third_party/terraform/services/parametermanager/resource_parameter_manager_parameter_test.go.tmpl
@@ -0,0 +1,109 @@
+package parametermanager_test
+{{- if ne $.TargetVersionName "ga" }}
+
+import (
+	"testing"
+
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+	"github.com/hashicorp/terraform-provider-google/google/acctest"
+)
+
+func TestAccParameterManagerParameter_labelsUpdate(t *testing.T) {
+	t.Parallel()
+
+	context := map[string]interface{}{
+		"random_suffix": acctest.RandString(t, 10),
+	}
+
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t),
+		CheckDestroy:             testAccCheckParameterManagerParameterDestroyProducer(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testAccParameterManagerParameter_withoutLabels(context),
+			},
+			{
+				ResourceName:            "google_parameter_manager_parameter.parameter-with-labels",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"labels", "parameter_id", "terraform_labels"},
+			},
+			{
+				Config: testAccParameterManagerParameter_labelsUpdate(context),
+			},
+			{
+				ResourceName:            "google_parameter_manager_parameter.parameter-with-labels",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"labels", "parameter_id", "terraform_labels"},
+			},
+			{
+				Config: testAccParameterManagerParameter_labelsUpdateOther(context),
+			},
+			{
+				ResourceName:            "google_parameter_manager_parameter.parameter-with-labels",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"labels", "parameter_id", "terraform_labels"},
+			},
+			{
+				Config: testAccParameterManagerParameter_withoutLabels(context),
+			},
+			{
+				ResourceName:            "google_parameter_manager_parameter.parameter-with-labels",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"labels", "parameter_id", "terraform_labels"},
+			},
+		},
+	})
+}
+
+func testAccParameterManagerParameter_withoutLabels(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_parameter_manager_parameter" "parameter-with-labels" {
+  provider = google-beta
+  parameter_id = "tf_test_parameter%{random_suffix}"
+  format = "JSON"
+}
+`, context)
+}
+
+func testAccParameterManagerParameter_labelsUpdate(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_parameter_manager_parameter" "parameter-with-labels" {
+  provider = google-beta
+  parameter_id = "tf_test_parameter%{random_suffix}"
+  format = "JSON"
+
+  labels = {
+    key1 = "val1"
+    key2 = "val2"
+    key3 = "val3"
+    key4 = "val4"
+    key5 = "val5"
+  }
+}
+`, context)
+}
+
+func testAccParameterManagerParameter_labelsUpdateOther(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_parameter_manager_parameter" "parameter-with-labels" {
+  provider = google-beta
+  parameter_id = "tf_test_parameter%{random_suffix}"
+  format = "JSON"
+
+  labels = {
+    key1 = "val1"
+    key2 = "updateval2"
+    updatekey3 = "val3"
+    updatekey4 = "updateval4"
+    key6 = "val6"
+  }
+}
+`, context)
+}
+
+{{ end }}
diff --git a/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go b/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go
index 24e4e30f66f5..8f48b4a5df94 100644
--- a/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go
+++ b/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go
@@ -153,6 +153,7 @@ func flattenDatasourceGoogleDatabaseInstancesList(fetchedInstances []*sqladmin.D
 		}
 
 		instance["replica_configuration"] = flattenReplicaConfigurationforDataSource(rawInstance.ReplicaConfiguration)
+		instance["replication_cluster"] = flattenReplicationClusterForDataSource(rawInstance.ReplicationCluster)
 
 		ipAddresses := flattenIpAddresses(rawInstance.IpAddresses)
 		instance["ip_address"] = ipAddresses
@@ -198,3 +199,19 @@ func flattenReplicaConfigurationforDataSource(replicaConfiguration *sqladmin.Rep
 
 	return rc
 }
+
+// flattenReplicationClusterForDataSource converts cloud SQL backend ReplicationCluster (proto) to
+// terraform replication_cluster. We explicitly allow the case when ReplicationCluster
+// is nil since replication_cluster is computed+optional.
+func flattenReplicationClusterForDataSource(replicationCluster *sqladmin.ReplicationCluster) []map[string]interface{} {
+	data := make(map[string]interface{})
+	data["failover_dr_replica_name"] = ""
+	if replicationCluster != nil && replicationCluster.FailoverDrReplicaName != "" {
+		data["failover_dr_replica_name"] = replicationCluster.FailoverDrReplicaName
+	}
+	data["dr_replica"] = false
+	if replicationCluster != nil {
+		data["dr_replica"] = replicationCluster.DrReplica
+	}
+	return []map[string]interface{}{data}
+}
diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl
index 1ce97cf50157..27acb36fb148 100644
--- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl
+++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl
@@ -925,6 +925,27 @@ is set to true. Defaults to ZONAL.`,
 				},
 				Description: `The replicas of the instance.`,
 			},
+			"replication_cluster": {
+				Type:     schema.TypeList,
+				Computed: true,
+				Optional: true,
+				MaxItems: 1,
+				Elem: &schema.Resource{
+					Schema: map[string]*schema.Schema{
+						"failover_dr_replica_name": {
+							Type:        schema.TypeString,
+							Optional:    true,
+							Description: fmt.Sprintf(`If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. The standard format of this field is "your-project:your-instance". You can also set this field to "your-instance", but cloud SQL backend will convert it to the aforementioned standard format.`),
+						},
+						"dr_replica": {
+							Type:        schema.TypeBool,
+							Computed:    true,
+							Description: `Read-only field that indicates whether the replica is a DR replica.`,
+						},
+					},
+				},
+				Description: "A primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set only after both the primary and replica are created.",
+			},
 			"server_ca_cert": {
 				Type:      schema.TypeList,
 				Computed:  true,
@@ -1719,6 +1740,11 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e
 	if err := d.Set("replica_names", instance.ReplicaNames); err != nil {
 		return fmt.Errorf("Error setting replica_names: %w", err)
 	}
+
+	// We always set replication_cluster because it is computed+optional.
+	if err := d.Set("replication_cluster", flattenReplicationCluster(instance.ReplicationCluster, d)); err != nil {
+		return fmt.Errorf("Error setting replication_cluster: %w", err)
+	}
 	ipAddresses := flattenIpAddresses(instance.IpAddresses)
 	if err := d.Set("ip_address", ipAddresses); err != nil {
 		log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses")
@@ -1981,7 +2007,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{})
 			ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError},
 		})
 		if err != nil {
-			return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", instance.Name, err)
+			return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", d.Get("name"), err)
 		}
 		err = SqlAdminOperationWaitTime(config, op, project, "Promote Instance", userAgent, d.Timeout(schema.TimeoutUpdate))
 		if err != nil {
@@ -2050,6 +2076,13 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{})
 		instance.DatabaseVersion = databaseVersion
 	}
 
+	failoverDrReplicaName := d.Get("replication_cluster.0.failover_dr_replica_name").(string)
+	if failoverDrReplicaName != "" {
+		instance.ReplicationCluster = &sqladmin.ReplicationCluster{
+			FailoverDrReplicaName: failoverDrReplicaName,
+		}
+	}
+
 	err = transport_tpg.Retry(transport_tpg.RetryOptions{
 		RetryFunc: func() (rerr error) {
 			op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do()
@@ -2377,6 +2410,22 @@ func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]
 	return flags
 }
 
+// flattenReplicationCluster converts cloud SQL backend ReplicationCluster (proto) to
+// terraform replication_cluster. We explicitly allow the case when ReplicationCluster
+// is nil since replication_cluster is computed+optional.
+func flattenReplicationCluster(replicationCluster *sqladmin.ReplicationCluster, d *schema.ResourceData) []map[string]interface{} {
+	data := make(map[string]interface{})
+	data["failover_dr_replica_name"] = ""
+	if replicationCluster != nil && replicationCluster.FailoverDrReplicaName != "" {
+		data["failover_dr_replica_name"] = replicationCluster.FailoverDrReplicaName
+	}
+	data["dr_replica"] = false
+	if replicationCluster != nil {
+		data["dr_replica"] = replicationCluster.DrReplica
+	}
+	return []map[string]interface{}{data}
+}
+
 func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema.ResourceData) interface{} {
 	data := map[string]interface{}{
 		"ipv4_enabled":                                  ipConfiguration.Ipv4Enabled,
@@ -2659,11 +2708,6 @@ func isSwitchoverRequested(d *schema.ResourceData) bool {
 	if !slices.Contains(newReplicaNames.([]interface{}), originalPrimaryName) {
 		return false
 	}
-	dbVersion := d.Get("database_version")
-	if !strings.HasPrefix(dbVersion.(string), "SQLSERVER") {
-		log.Printf("[WARN] Switchover is only supported for SQL Server %q", dbVersion)
-		return false
-	}
 	return true
 }
 
@@ -2681,10 +2725,6 @@ func isReplicaPromoteRequested(_ context.Context, oldInstanceType interface{}, n
 // Check if this resource change is the manual update done on old primary after a switchover. If true, no replacement is needed.
 func isSwitchoverFromOldPrimarySide(d *schema.ResourceDiff) bool {
 	dbVersion := d.Get("database_version")
-	if !strings.HasPrefix(dbVersion.(string), "SQLSERVER") {
-		log.Printf("[WARN] Switchover is only supported for SQL Server %q", dbVersion)
-		return false
-	}
 	oldInstanceType, newInstanceType := d.GetChange("instance_type")
 	oldReplicaNames, newReplicaNames := d.GetChange("replica_names")
     _, newMasterInstanceName := d.GetChange("master_instance_name")
@@ -2699,11 +2739,12 @@ func isSwitchoverFromOldPrimarySide(d *schema.ResourceDiff) bool {
 	newMasterInOldReplicaNames := slices.Contains(oldReplicaNames.([]interface{}), newMasterInstanceName)
 	newMasterNotInNewReplicaNames := !slices.Contains(newReplicaNames.([]interface{}), newMasterInstanceName)
 	isCascadableReplica := cascadableReplicaFieldExists && cascadableReplica.(bool)
+	isSQLServer := strings.HasPrefix(dbVersion.(string), "SQLSERVER")
 
 	return newMasterInstanceName != nil &&
 	instanceTypeChangedFromPrimaryToReplica &&
-	newMasterInOldReplicaNames && newMasterNotInNewReplicaNames &&
-	isCascadableReplica
+	newMasterInOldReplicaNames && newMasterNotInNewReplicaNames && (!isSQLServer ||
+	isCascadableReplica)
 }
 
 func checkPromoteConfigurations(d *schema.ResourceData) error {
diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go
index 52e84b781316..d9b5952d6d28 100644
--- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go
+++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go
@@ -2583,6 +2583,158 @@ func TestAccSqlDatabaseInstance_SwitchoverSuccess(t *testing.T) {
 	})
 }
 
+// Switchover for MySQL.
+func TestAccSqlDatabaseInstance_MysqlSwitchoverSuccess(t *testing.T) {
+	t.Parallel()
+	primaryName := "tf-test-mysql-sw-primary-" + acctest.RandString(t, 10)
+	replicaName := "tf-test-mysql-sw-replica-" + acctest.RandString(t, 10)
+	project := envvar.GetTestProjectFromEnv()
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
+		CheckDestroy:             testAccSqlDatabaseInstanceDestroyProducer(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testGoogleSqlDatabaseInstanceConfig_mysqlEplusWithReplica(project, primaryName, replicaName),
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-primary",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-replica",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			// Let's make sure that setting and unsetting failover replica works.
+			{
+				Config: googleSqlDatabaseInstance_mysqlSetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				Config: googleSqlDatabaseInstance_mysqlUnsetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				Config: googleSqlDatabaseInstance_mysqlSetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				// Split into two configs because current TestStep implementation checks diff before refreshing.
+				Config: googleSqlDatabaseInstance_mysqlSwitchoverOnReplica(project, primaryName, replicaName),
+				// Original primary needs to be updated at the next step.
+				ExpectNonEmptyPlan: true,
+			},
+			{
+				Config: googleSqlDatabaseInstance_mysqlUpdatePrimaryAfterSwitchover(project, primaryName, replicaName),
+			},
+			{
+				RefreshState: true,
+				Check:        resource.ComposeTestCheckFunc(resource.TestCheckTypeSetElemAttr("google_sql_database_instance.original-replica", "replica_names.*", primaryName), checkSwitchoverOriginalReplicaConfigurations("google_sql_database_instance.original-replica"), checkSwitchoverOriginalPrimaryConfigurations("google_sql_database_instance.original-primary", replicaName)),
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-primary",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			{
+				ResourceName:      "google_sql_database_instance.original-replica",
+				ImportState:       true,
+				ImportStateVerify: true,
+				// original-replica is no longer a replica, but replica_configuration is O + C and cannot be unset
+				ImportStateVerifyIgnore: []string{"replica_configuration", "deletion_protection", "root_password"},
+			},
+			{
+				// Delete replica first so PostTestDestroy doesn't fail when deleting instances which have replicas. We've already validated switchover behavior, the remaining steps are cleanup
+				Config: googleSqlDatabaseInstance_mysqlDeleteReplicasAfterSwitchover(project, primaryName, replicaName),
+				// We delete replica, but haven't updated the master's replica_names
+				ExpectNonEmptyPlan: true,
+			},
+			{
+				// Remove replica from primary's resource
+				Config: googleSqlDatabaseInstance_mysqlRemoveReplicaFromPrimaryAfterSwitchover(project, replicaName),
+			},
+		},
+	})
+}
+
+// Switchover for PostgreSQL.
+func TestAccSqlDatabaseInstance_PostgresSwitchoverSuccess(t *testing.T) {
+	t.Parallel()
+	primaryName := "tf-test-pg-sw-primary-" + acctest.RandString(t, 10)
+	replicaName := "tf-test-pg-sw-replica-" + acctest.RandString(t, 10)
+	project := envvar.GetTestProjectFromEnv()
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
+		CheckDestroy:             testAccSqlDatabaseInstanceDestroyProducer(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testGoogleSqlDatabaseInstanceConfig_postgresEplusWithReplica(project, primaryName, replicaName),
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-primary",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-replica",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			// Let's make sure that setting and unsetting failover replica works.
+			{
+				Config: googleSqlDatabaseInstance_postgresSetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				Config: googleSqlDatabaseInstance_postgresUnsetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				Config: googleSqlDatabaseInstance_postgresSetFailoverReplica(project, primaryName, replicaName),
+			},
+			{
+				// Split into two configs because current TestStep implementation checks diff before refreshing.
+				Config: googleSqlDatabaseInstance_postgresSwitchoverOnReplica(project, primaryName, replicaName),
+				// Original primary needs to be updated at the next step.
+				ExpectNonEmptyPlan: true,
+			},
+			{
+				Config: googleSqlDatabaseInstance_postgresUpdatePrimaryAfterSwitchover(project, primaryName, replicaName),
+			},
+			{
+				RefreshState: true,
+				Check:        resource.ComposeTestCheckFunc(resource.TestCheckTypeSetElemAttr("google_sql_database_instance.original-replica", "replica_names.*", primaryName), checkSwitchoverOriginalReplicaConfigurations("google_sql_database_instance.original-replica"), checkSwitchoverOriginalPrimaryConfigurations("google_sql_database_instance.original-primary", replicaName)),
+			},
+			{
+				ResourceName:            "google_sql_database_instance.original-primary",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: ignoredReplicaConfigurationFields,
+			},
+			{
+				ResourceName:      "google_sql_database_instance.original-replica",
+				ImportState:       true,
+				ImportStateVerify: true,
+				// original-replica is no longer a replica, but replica_configuration is O + C and cannot be unset
+				ImportStateVerifyIgnore: []string{"replica_configuration", "deletion_protection", "root_password"},
+			},
+			{
+				// Delete replica first so PostTestDestroy doesn't fail when deleting instances which have replicas. We've already validated switchover behavior, the remaining steps are cleanup
+				Config: googleSqlDatabaseInstance_postgresDeleteReplicasAfterSwitchover(project, primaryName, replicaName),
+				// We delete replica, but haven't updated the master's replica_names
+				ExpectNonEmptyPlan: true,
+			},
+			{
+				// Remove replica from primary's resource
+				Config: googleSqlDatabaseInstance_postgresRemoveReplicaFromPrimaryAfterSwitchover(project, replicaName),
+			},
+		},
+	})
+}
+
 func TestAccSqlDatabaseInstance_updateSslOptionsForPostgreSQL(t *testing.T) {
 	t.Parallel()
 
@@ -3468,6 +3620,554 @@ resource "google_sql_database_instance" "original-replica" {
 `, replicaName)
 }
 
+func testGoogleSqlDatabaseInstanceConfig_mysqlEplusWithReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = google_sql_database_instance.original-primary.name
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName)
+}
+
+func googleSqlDatabaseInstance_mysqlSetFailoverReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName, project, replicaName, primaryName)
+}
+
+func googleSqlDatabaseInstance_mysqlUnsetFailoverReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName, primaryName)
+}
+
+func googleSqlDatabaseInstance_mysqlSwitchoverOnReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+`, project, primaryName, project, replicaName, project, replicaName, primaryName, project, primaryName)
+}
+
+func googleSqlDatabaseInstance_mysqlUpdatePrimaryAfterSwitchover(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-east1"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = false
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+`, project, primaryName, replicaName, project, replicaName, primaryName, project, primaryName)
+}
+
+// After a switchover, the original-primary is now the replica and must be removed first.
+func googleSqlDatabaseInstance_mysqlDeleteReplicasAfterSwitchover(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+`, project, replicaName, primaryName, project, primaryName)
+}
+
+// Update original-replica replica_names after deleting original-primary
+func googleSqlDatabaseInstance_mysqlRemoveReplicaFromPrimaryAfterSwitchover(project, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = []
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+`, project, replicaName)
+}
+
+func testGoogleSqlDatabaseInstanceConfig_postgresEplusWithReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = google_sql_database_instance.original-primary.name
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName)
+}
+
+func googleSqlDatabaseInstance_postgresSetFailoverReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName, project, replicaName, primaryName)
+}
+
+func googleSqlDatabaseInstance_postgresUnsetFailoverReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+`, project, primaryName, project, replicaName, primaryName)
+}
+
+func googleSqlDatabaseInstance_postgresSwitchoverOnReplica(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project             = "%s"
+  name                = "%s"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  deletion_protection = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+`, project, primaryName, project, replicaName, project, replicaName, primaryName, project, primaryName)
+}
+
+func googleSqlDatabaseInstance_postgresUpdatePrimaryAfterSwitchover(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-primary" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-east1"
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "%s"
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = false
+      point_in_time_recovery_enabled = false
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+`, project, primaryName, replicaName, project, replicaName, primaryName, project, primaryName)
+}
+
+// After a switchover, the original-primary is now the replica and must be removed first.
+func googleSqlDatabaseInstance_postgresDeleteReplicasAfterSwitchover(project, primaryName, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["%s"]
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = "%s:%s"
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+`, project, replicaName, primaryName, project, primaryName)
+}
+
+// Update original-replica replica_names after deleting original-primary
+func googleSqlDatabaseInstance_postgresRemoveReplicaFromPrimaryAfterSwitchover(project, replicaName string) string {
+	return fmt.Sprintf(`
+resource "google_sql_database_instance" "original-replica" {
+  project              = "%s"
+  name                 = "%s"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = []
+  deletion_protection  = false
+
+  replication_cluster {
+    failover_dr_replica_name = ""
+  }
+
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+`, project, replicaName)
+}
+
 func testAccSqlDatabaseInstance_basicInstanceForPsc(instanceName string, projectId string, orgId string, billingAccount string) string {
 	return fmt.Sprintf(`
 resource "google_project" "testproject" {
diff --git a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.tmpl b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.tmpl
index 9df865d0e4b5..f031aa885e59 100644
--- a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.tmpl
+++ b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.tmpl
@@ -690,3 +690,75 @@ resource "google_workbench_instance" "instance" {
 }
 `, context)
 }
+
+
+func TestAccWorkbenchInstance_updateCustomContainers(t *testing.T) {
+	t.Parallel()
+
+	context := map[string]interface{}{
+		"random_suffix": acctest.RandString(t, 10),
+	}
+
+	acctest.VcrTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest.AccTestPreCheck(t) },
+		ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
+		Steps: []resource.TestStep{
+			{
+				Config: testAccWorkbenchInstance_customcontainer(context),
+				Check: resource.ComposeTestCheckFunc(
+						resource.TestCheckResourceAttr(
+							"google_workbench_instance.instance", "state", "ACTIVE"),
+				),
+			},
+			{
+				ResourceName:            "google_workbench_instance.instance",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"},
+			},
+			{
+				Config: testAccWorkbenchInstance_updatedcustomcontainer(context),
+				Check: resource.ComposeTestCheckFunc(
+						resource.TestCheckResourceAttr(
+							"google_workbench_instance.instance", "state", "ACTIVE"),
+				),
+			},
+			{
+				ResourceName:            "google_workbench_instance.instance",
+				ImportState:             true,
+				ImportStateVerify:       true,
+				ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"},
+			},
+		},
+	})
+}
+
+func testAccWorkbenchInstance_customcontainer(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_workbench_instance" "instance" {
+  name = "tf-test-workbench-instance%{random_suffix}"
+  location = "us-central1-a"
+  gce_setup {
+    container_image {
+      repository = "us-docker.pkg.dev/deeplearning-platform-release/gcr.io/base-cu113.py310"
+      tag = "latest"
+    }
+  }
+}
+`, context)
+}
+
+func testAccWorkbenchInstance_updatedcustomcontainer(context map[string]interface{}) string {
+	return acctest.Nprintf(`
+resource "google_workbench_instance" "instance" {
+  name = "tf-test-workbench-instance%{random_suffix}"
+  location = "us-central1-a"
+  gce_setup {
+    container_image {
+      repository = "gcr.io/deeplearning-platform-release/workbench-container"
+      tag = "20241117-2200-rc0"
+    }
+  }
+}
+`, context)
+}
diff --git a/mmv1/third_party/terraform/website/docs/guides/sql_instance_switchover.html.markdown b/mmv1/third_party/terraform/website/docs/guides/sql_instance_switchover.html.markdown
index 07623519a9ed..eaa817f0de0c 100644
--- a/mmv1/third_party/terraform/website/docs/guides/sql_instance_switchover.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/guides/sql_instance_switchover.html.markdown
@@ -7,7 +7,7 @@ description: |-
 # Performing a SQL Instance Switchover
 This page is a brief walkthrough of performing a switchover through terraform. 
 
-  ~> **NOTE:** Only supported for SQL Server.
+## SQL Server
 
 1. Create a **cross-region** primary and cascadable replica. It is recommended to use deletion_protection to prevent accidental deletions.
 ```
@@ -83,4 +83,405 @@ resource "google_sql_database_instance" "original-primary" {
 - `terraform plan` does not say **"must be replaced"** for any resource
 - Every resource **"will be updated in-place"**
 - Only the 2 instances involved in switchover have planned changes
-- (Recommended) Use `deletion_protection` on instances as a safety measure
\ No newline at end of file
+- (Recommended) Use `deletion_protection` on instances as a safety measure
+
+## MySQL
+
+1. Create a **cross-region, Enterprise Plus edition** primary and replica. The primary should have backup and binary log enabled.
+
+```
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  # Can be any region.
+  region              = "us-east1"
+  # Any database version that supports Enterprise Plus edition.
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
+  settings {
+    # Any tier that supports Enterprise Plus edition.
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+  
+  # You can add more settings.
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  # Can be any region, but must be different from the primary's region.
+  region               = "us-west2"
+  # Must be same as the primary's database_version.
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = google_sql_database_instance.original-primary.name
+  
+  settings {
+    # Any tier that supports Enterprise Plus edition.
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+  
+  # You can add more settings.
+}
+```
+
+2. Designate the replica as DR replica of the primary by adding `replication_cluster.failover_dr_replica_name`.
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
++  replication_cluster {
++    # Note that the format of the name is "project:instance".
++    # If you want to unset DR replica, put empty string in this field.
++    failover_dr_replica_name = "your-project:your-original-replica"
++  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "your-original-primary"
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+```
+
+3. Invoke switchover on the original replica.
+
+* Change `instance_type` from `READ_REPLICA_INSTANCE` to `CLOUD_SQL_INSTANCE`.
+* Remove `master_instance_name`.
+* Add original primary's name to the original replica's `replica_names` list and `replication_cluster.failover_dr_replica_name`.
+* Enable backup and binary log for original replica.
+
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
+  replication_cluster {
+    failover_dr_replica_name = "your-project:your-original-replica"
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+-  instance_type        = "READ_REPLICA_INSTANCE"
++  instance_type        = "CLOUD_SQL_INSTANCE"
+-  master_instance_name = "your-original-primary"
++  replica_names        = ["your-original-primary"]
+
++  replication_cluster {
++    failover_dr_replica_name = "your-project:your-original-primary"
++  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
++    backup_configuration {
++      enabled            = true
++      binary_log_enabled = true
++    }    
+  }
+}
+```
+
+4. Update the original primary and run `terraform plan`.
+* Change `instance_type` from `CLOUD_SQL_INSTANCE` to `READ_REPLICA_INSTANCE`.
+* Set `master_instance_name` to the new primary (original replica).
+* (If `replica_names` is present) Remove original replica from `replica_names`.
+  * **NOTE**: Do **not** delete the `replica_names` field, even if it has no replicas remaining. Set `replica_names = [ ]` to indicate it having no replicas.
+* Remove original replica from `replication_cluster.failover_dr_replica_name` by setting this field to the empty string.
+* Disable backup for original primary (because it became a replica).
+* Run `terraform plan` and verify that your configuration matches infrastructure. You should see a message like the following:
+  * **`No changes. Your infrastructure matches the configuration.`**
+
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "MYSQL_8_0"
+-  instance_type        = "CLOUD_SQL_INSTANCE"
++  instance_type        = "READ_REPLICA_INSTANCE"
++  master_instance_name = "your-original-replica"
+  
+  replication_cluster {
+-    failover_dr_replica_name = "your-project:your-original-replica"
++    failover_dr_replica_name = ""
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+-      enabled            = true
++      enabled            = false
+      binary_log_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "MYSQL_8_0"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["your-original-primary"]
+
+  replication_cluster {
+    failover_dr_replica_name = "your-project:your-original-primary"
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled            = true
+      binary_log_enabled = true
+    }    
+  }
+}
+```
+
+## PostgreSQL
+
+1. Create a **cross-region, Enterprise Plus edition** primary and replica. The primary should have backup and PITR enabled.
+
+```
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  # Can be any region.
+  region              = "us-east1"
+  # Any database version that supports Enterprise Plus edition.
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
+  settings {
+    # Any tier that supports Enterprise Plus edition.
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+  
+  # You can add more settings.
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  # Can be any region, but must be different from the primary's region.
+  region               = "us-west2"
+  # Must be same as the primary's database_version.
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = google_sql_database_instance.original-primary.name
+  
+  settings {
+    # Any tier that supports Enterprise Plus edition.
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+  
+  # You can add more settings.
+}
+```
+
+2. Designate the replica as DR replica of the primary by adding `replication_cluster.failover_dr_replica_name`.
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
++  replication_cluster {
++    # Note that the format of the name is "project:instance".
++    # If you want to unset DR replica, put empty string in this field.
++    failover_dr_replica_name = "your-project:your-original-replica"
++  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "READ_REPLICA_INSTANCE"
+  master_instance_name = "your-original-primary"
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+  }
+}
+```
+
+3. Invoke switchover on the original replica.
+
+* Change `instance_type` from `READ_REPLICA_INSTANCE` to `CLOUD_SQL_INSTANCE`.
+* Remove `master_instance_name`.
+* Add original primary's name to the original replica's `replica_names` list and `replication_cluster.failover_dr_replica_name`.
+* Enable backup and PITR for original replica.
+
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+  instance_type       = "CLOUD_SQL_INSTANCE"
+  
+  replication_cluster {
+    failover_dr_replica_name = "your-project:your-original-replica"
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+-  instance_type        = "READ_REPLICA_INSTANCE"
++  instance_type        = "CLOUD_SQL_INSTANCE"
+-  master_instance_name = "your-original-primary"
++  replica_names        = ["your-original-primary"]
+
++  replication_cluster {
++    failover_dr_replica_name = "your-project:your-original-primary"
++  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
++    backup_configuration {
++      enabled                        = true
++      point_in_time_recovery_enabled = true
++    }  
+  }
+}
+```
+
+4. Update the original primary and run `terraform plan`.
+* Change `instance_type` from `CLOUD_SQL_INSTANCE` to `READ_REPLICA_INSTANCE`.
+* Set `master_instance_name` to the new primary (original replica).
+* (If `replica_names` is present) Remove original replica from `replica_names`.
+  * **NOTE**: Do **not** delete the `replica_names` field, even if it has no replicas remaining. Set `replica_names = [ ]` to indicate it having no replicas.
+* Remove original replica from `replication_cluster.failover_dr_replica_name` by setting this field to the empty string.
+* Disable backup and PITR for original primary (because it became a replica).
+* Run `terraform plan` and verify that your configuration matches infrastructure. You should see a message like the following:
+  * **`No changes. Your infrastructure matches the configuration.`**
+
+```diff
+resource "google_sql_database_instance" "original-primary" {
+  project             = "your-project"
+  name                = "your-original-primary"
+  region              = "us-east1"
+  database_version    = "POSTGRES_12"
+-  instance_type        = "CLOUD_SQL_INSTANCE"
++  instance_type        = "READ_REPLICA_INSTANCE"
++  master_instance_name = "your-original-replica"
+  
+  replication_cluster {
+-    failover_dr_replica_name = "your-project:your-original-replica"
++    failover_dr_replica_name = ""
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+-      enabled            = true
++      enabled            = false
+-      point_in_time_recovery_enabled = true
++      point_in_time_recovery_enabled = false
+    }
+  }
+}
+
+resource "google_sql_database_instance" "original-replica" {
+  project              = "your-project"
+  name                 = "your-original-replica"
+  region               = "us-west2"
+  database_version     = "POSTGRES_12"
+  instance_type        = "CLOUD_SQL_INSTANCE"
+  replica_names        = ["your-original-primary"]
+
+  replication_cluster {
+    failover_dr_replica_name = "your-project:your-original-primary"
+  }
+  
+  settings {
+    tier              = "db-perf-optimized-N-2"
+    edition           = "ENTERPRISE_PLUS"
+    backup_configuration {
+      enabled                        = true
+      point_in_time_recovery_enabled = true
+    }    
+  }
+}
+```
diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown
index 6f6e2e668bd3..8992cfeadf09 100644
--- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown
@@ -557,6 +557,12 @@ block during resource creation/update will trigger the restore action after the
 
 * `project` - (Optional) The full project ID of the source instance.`
 
+The optional, computed `replication_cluster` block represents a primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set only after both the primary and replica are created. This block supports:
+
+* `failover_dr_replica_name`: (Optional) If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. The standard format of this field is "your-project:your-instance". You can also set this field to "your-instance", but cloud SQL backend will convert it to the aforementioned standard format.
+
+* `dr_replica`: Read-only field that indicates whether the replica is a DR replica.
+
 ## Attributes Reference
 
 In addition to the arguments listed above, the following computed attributes are
@@ -620,8 +626,8 @@ performing filtering in a Terraform config.
 
 * `server_ca_cert.0.sha1_fingerprint` - SHA Fingerprint of the CA Cert.
 
-## Switchover (SQL Server Only)
-Users can perform a switchover on any direct `cascadable` replica by following the steps below.
+## Switchover
+Users can perform a switchover on a replica by following the steps below.
 
   ~>**WARNING:** Failure to follow these steps can lead to data loss (You will be warned during plan stage). To prevent data loss during a switchover, please verify your plan with the checklist below.
 
@@ -629,22 +635,26 @@ For a more in-depth walkthrough with example code, see the [Switchover Guide](..
 
 ### Steps to Invoke Switchover
 
-Create a `cascadable` replica in a different region from the primary (`cascadable_replica` is set to true in `replica_configuration`)
+MySQL/PostgreSQL: Create a cross-region, Enterprise Plus edition primary and replica pair, then set the value of primary's `replication_cluster.failover_dr_replica_name` as the replica.
+
+SQL Server: Create a `cascadable` replica in a different region from the primary (`cascadable_replica` is set to true in `replica_configuration`)
 
 #### Invoking switchover in the replica resource:
 1. Change instance_type from `READ_REPLICA_INSTANCE` to `CLOUD_SQL_INSTANCE`
 2. Remove `master_instance_name`
-3. Remove `replica_configuration`
+3. (SQL Server) Remove `replica_configuration`
 4. Add current primary's name to the replica's `replica_names` list
+5. (MySQL/PostgreSQL) Add current primary's name to the replica's `replication_cluster.failover_dr_replica_name`.
+6. (MySQL/PostgreSQL) Adjust `backup_configuration`. See [Switchover Guide](../guides/sql_instance_switchover.html.markdown) for details.
 
 #### Updating the primary resource:
 1. Change `instance_type` from `CLOUD_SQL_INSTANCE` to `READ_REPLICA_INSTANCE`
 2. Set `master_instance_name` to the original replica (which will be primary after switchover)
-3. Set `replica_configuration` and set `cascadable_replica` to `true`
+3. (SQL Server) Set `replica_configuration` and set `cascadable_replica` to `true`
 4. Remove original replica from `replica_names`
-
-    ~> **NOTE**: Do **not** delete the replica_names field, even if it has no replicas remaining. Set replica_names = [ ] to indicate it having no replicas.
-
+   * **NOTE**: Do **not** delete the replica_names field, even if it has no replicas remaining. Set replica_names = [ ] to indicate it having no replicas.
+5. (MySQL/PostgreSQL) Set `replication_cluster.failover_dr_replica_name` as the empty string.
+6. (MySQL/PostgreSQL) Adjust `backup_configuration`. See [Switchover Guide](../guides/sql_instance_switchover.html.markdown) for details.
 #### Plan and verify that:
 - `terraform plan` outputs **"0 to add, 0 to destroy"**
 - `terraform plan` does not say **"must be replaced"** for any resource
diff --git a/tools/diff-processor/cmd/detect_missing_docs.go b/tools/diff-processor/cmd/detect_missing_docs.go
new file mode 100644
index 000000000000..60a5a427dac6
--- /dev/null
+++ b/tools/diff-processor/cmd/detect_missing_docs.go
@@ -0,0 +1,79 @@
+package cmd
+
+import (
+	newProvider "google/provider/new/google/provider"
+	oldProvider "google/provider/old/google/provider"
+	"slices"
+	"sort"
+
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/detector"
+	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff"
+	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+	"github.com/spf13/cobra"
+	"golang.org/x/exp/maps"
+)
+
+const detectMissingDocDesc = `Compute list of fields missing documents`
+
+type MissingDocsInfo struct {
+	Name     string
+	FilePath string
+	Fields   []string
+}
+
+type detectMissingDocsOptions struct {
+	rootOptions       *rootOptions
+	computeSchemaDiff func() diff.SchemaDiff
+	newResourceSchema map[string]*schema.Resource
+	stdout            io.Writer
+}
+
+func newDetectMissingDocsCmd(rootOptions *rootOptions) *cobra.Command {
+	o := &detectMissingDocsOptions{
+		rootOptions: rootOptions,
+		computeSchemaDiff: func() diff.SchemaDiff {
+			return diff.ComputeSchemaDiff(oldProvider.ResourceMap(), newProvider.ResourceMap())
+		},
+		stdout: os.Stdout,
+	}
+	cmd := &cobra.Command{
+		Use:   "detect-missing-docs",
+		Short: detectMissingDocDesc,
+		Long:  detectMissingDocDesc,
+		Args:  cobra.ExactArgs(1),
+		RunE: func(c *cobra.Command, args []string) error {
+			return o.run(args)
+		},
+	}
+	return cmd
+}
+func (o *detectMissingDocsOptions) run(args []string) error {
+	schemaDiff := o.computeSchemaDiff()
+	detectedResources, err := detector.DetectMissingDocs(schemaDiff, args[0], o.newResourceSchema)
+	if err != nil {
+		return err
+	}
+	resources := maps.Keys(detectedResources)
+	slices.Sort(resources)
+	info := []MissingDocsInfo{}
+	for _, r := range resources {
+		details := detectedResources[r]
+		sort.Strings(details.Fields)
+		info = append(info, MissingDocsInfo{
+			Name:     r,
+			FilePath: details.FilePath,
+			Fields:   details.Fields,
+		})
+	}
+
+	if err := json.NewEncoder(o.stdout).Encode(info); err != nil {
+		return fmt.Errorf("error encoding json: %w", err)
+	}
+
+	return nil
+}
diff --git a/tools/diff-processor/cmd/detect_missing_docs_test.go b/tools/diff-processor/cmd/detect_missing_docs_test.go
new file mode 100644
index 000000000000..e385488c04ba
--- /dev/null
+++ b/tools/diff-processor/cmd/detect_missing_docs_test.go
@@ -0,0 +1,91 @@
+package cmd
+
+import (
+	"bytes"
+	"encoding/json"
+	"testing"
+
+	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff"
+	"github.com/google/go-cmp/cmp"
+
+	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+func TestDetectMissingDocs(t *testing.T) {
+	cases := []struct {
+		name           string
+		oldResourceMap map[string]*schema.Resource
+		newResourceMap map[string]*schema.Resource
+		want           []MissingDocsInfo
+	}{
+		{
+			name: "no new fields",
+			oldResourceMap: map[string]*schema.Resource{
+				"google_x": {
+					Schema: map[string]*schema.Schema{
+						"field-a": {Description: "beep", Computed: true, Optional: true},
+						"field-b": {Description: "beep", Computed: true},
+					},
+				},
+			},
+			newResourceMap: map[string]*schema.Resource{
+				"google_x": {
+					Schema: map[string]*schema.Schema{
+						"field-a": {Description: "beep", Computed: true, Optional: true},
+						"field-b": {Description: "beep", Computed: true},
+					},
+				},
+			},
+			want: []MissingDocsInfo{},
+		},
+		{
+			name:           "multiple new fields missing doc",
+			oldResourceMap: map[string]*schema.Resource{},
+			newResourceMap: map[string]*schema.Resource{
+				"google_x": {
+					Schema: map[string]*schema.Schema{
+						"field-a": {Description: "beep", Computed: true, Optional: true},
+						"field-b": {Description: "beep", Computed: true},
+					},
+				},
+			},
+			want: []MissingDocsInfo{
+				{
+					Name:     "google_x",
+					FilePath: "/website/docs/r/x.html.markdown",
+					Fields:   []string{"field-a", "field-b"},
+				},
+			},
+		},
+	}
+
+	for _, tc := range cases {
+		t.Run(tc.name, func(t *testing.T) {
+			var buf bytes.Buffer
+			o := detectMissingDocsOptions{
+				computeSchemaDiff: func() diff.SchemaDiff {
+					return diff.ComputeSchemaDiff(tc.oldResourceMap, tc.newResourceMap)
+				},
+				newResourceSchema: tc.newResourceMap,
+				stdout:            &buf,
+			}
+
+			err := o.run([]string{t.TempDir()})
+			if err != nil {
+				t.Fatalf("Error running command: %s", err)
+			}
+
+			out := make([]byte, buf.Len())
+			buf.Read(out)
+
+			var got []MissingDocsInfo
+			if err = json.Unmarshal(out, &got); err != nil {
+				t.Fatalf("Failed to unmarshall output: %s", err)
+			}
+
+			if diff := cmp.Diff(tc.want, got); diff != "" {
+				t.Errorf("Unexpected result. Want %+v, got %+v. ", tc.want, got)
+			}
+		})
+	}
+}
diff --git a/tools/diff-processor/detector/detector.go b/tools/diff-processor/detector/detector.go
index 8305aaac1407..05e0720f085f 100644
--- a/tools/diff-processor/detector/detector.go
+++ b/tools/diff-processor/detector/detector.go
@@ -1,10 +1,14 @@
 package detector
 
 import (
+	"fmt"
+	"os"
+	"path/filepath"
 	"sort"
 	"strings"
 
 	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff"
+	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/documentparser"
 	"github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader"
 	"github.com/hashicorp/hcl/v2/hclwrite"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -33,6 +37,12 @@ type Field struct {
 	Tested bool
 }
 
+// MissingDocDetails denotes the doc file path and the fields that are not shown up in the corresponding doc.
+type MissingDocDetails struct {
+	FilePath string
+	Fields   []string
+}
+
 // Detect missing tests for the given resource changes map in the given slice of tests.
 // Return a map of resource names to missing test info about that resource.
 func DetectMissingTests(schemaDiff diff.SchemaDiff, allTests []*reader.Test) (map[string]*MissingTestInfo, error) {
@@ -152,3 +162,91 @@ func suggestedTest(resourceName string, untested []string) string {
 	}
 	return strings.ReplaceAll(string(f.Bytes()), `"VALUE"`, "# value needed")
 }
+
+// DetectMissingDocs detect new fields that are missing docs given the schema diffs.
+// Return a map of resource names to missing doc info.
+func DetectMissingDocs(schemaDiff diff.SchemaDiff, repoPath string, resourceMap map[string]*schema.Resource) (map[string]MissingDocDetails, error) {
+	ret := make(map[string]MissingDocDetails)
+	for resource, resourceDiff := range schemaDiff {
+		fieldsInDoc := make(map[string]bool)
+
+		docFilePath, err := resourceToDocFile(resource, repoPath)
+		if err != nil {
+			fmt.Printf("Warning: %s.\n", err)
+		} else {
+			content, err := os.ReadFile(docFilePath)
+			if err != nil {
+				return nil, fmt.Errorf("failed to read resource doc %s: %w", docFilePath, err)
+			}
+			parser := documentparser.NewParser()
+			err = parser.Parse(content)
+			if err != nil {
+				return nil, fmt.Errorf("failed to parse document %s: %w", docFilePath, err)
+			}
+
+			argumentsInDoc := listToMap(parser.Arguments())
+			attributesInDoc := listToMap(parser.Attributes())
+			for _, m := range []map[string]bool{argumentsInDoc, attributesInDoc} {
+				for k, v := range m {
+					fieldsInDoc[k] = v
+				}
+			}
+			// for iam resource
+			if v, ok := fieldsInDoc["member/members"]; ok {
+				fieldsInDoc["member"] = v
+				fieldsInDoc["members"] = v
+			}
+		}
+		details := MissingDocDetails{
+			FilePath: strings.ReplaceAll(docFilePath, repoPath, ""),
+		}
+
+		for field, fieldDiff := range resourceDiff.Fields {
+			if !isNewField(fieldDiff) {
+				continue
+			}
+			if !fieldsInDoc[field] {
+				details.Fields = append(details.Fields, field)
+			}
+		}
+		if len(details.Fields) > 0 {
+			ret[resource] = details
+		}
+	}
+	return ret, nil
+}
+
+func isNewField(fieldDiff diff.FieldDiff) bool {
+	return fieldDiff.Old == nil && fieldDiff.New != nil
+}
+
+func resourceToDocFile(resource string, repoPath string) (string, error) {
+	baseNameOptions := []string{
+		strings.TrimPrefix(resource, "google_") + ".html.markdown",
+		resource + ".html.markdown",
+	}
+	suffix := []string{"_policy", "_binding", "_member"}
+	for _, s := range suffix {
+		if strings.HasSuffix(resource, "_iam"+s) {
+			iamName := strings.TrimSuffix(resource, s)
+			baseNameOptions = append(baseNameOptions, iamName+".html.markdown")
+			baseNameOptions = append(baseNameOptions, strings.TrimPrefix(iamName, "google_")+".html.markdown")
+		}
+	}
+	for _, baseName := range baseNameOptions {
+		fullPath := filepath.Join(repoPath, "website", "docs", "r", baseName)
+		_, err := os.ReadFile(fullPath)
+		if !os.IsNotExist(err) {
+			return fullPath, nil
+		}
+	}
+	return filepath.Join(repoPath, "website", "docs", "r", baseNameOptions[0]), fmt.Errorf("no document files found in %s for resource %q", baseNameOptions, resource)
+}
+
+func listToMap(items []string) map[string]bool {
+	m := make(map[string]bool)
+	for _, item := range items {
+		m[item] = true
+	}
+	return m
+}
diff --git a/tools/diff-processor/detector/detector_test.go b/tools/diff-processor/detector/detector_test.go
index 60ad7739bc7f..30f0dcc5813a 100644
--- a/tools/diff-processor/detector/detector_test.go
+++ b/tools/diff-processor/detector/detector_test.go
@@ -2,10 +2,12 @@ package detector
 
 import (
 	"reflect"
+	"sort"
 	"testing"
 
 	"github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff"
 	"github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader"
+	"github.com/google/go-cmp/cmp"
 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
 )
 
@@ -201,3 +203,206 @@ func TestGetMissingTestsForChanges(t *testing.T) {
 		}
 	}
 }
+
+func TestDetectMissingDocs(t *testing.T) {
+	// top level field_one is argument, field_two is attribute.
+	resourceSchema := map[string]*schema.Resource{
+		"a_resource": {
+			Schema: map[string]*schema.Schema{
+				"field_one": {
+					Computed: true,
+					Optional: true,
+					Elem: &schema.Resource{
+						Schema: map[string]*schema.Schema{
+							"a": {
+								Computed: true,
+								Optional: true,
+							},
+							"b": {
+								Computed: true,
+								Optional: false,
+							},
+							"c": {
+								Computed: true,
+								Optional: false,
+							},
+						},
+					},
+				},
+				"field_two": {
+					Computed: true,
+					Elem: &schema.Resource{
+						Schema: map[string]*schema.Schema{
+							"a": {
+								Computed: true,
+								Optional: false,
+							},
+							"b": {
+								Computed: true,
+								Optional: false,
+							},
+							"c": {
+								Computed: true,
+								Optional: false,
+							},
+						},
+					},
+				},
+				"field_three": {
+					Computed: true,
+					Optional: true,
+				},
+				"field_four": {
+					Computed: true,
+				},
+			},
+		},
+	}
+
+	// If repo is not temp dir, then the doc file points to tools/diff-processor/testdata/website/docs/r/a_resource.html.markdown.
+	for _, test := range []struct {
+		name       string
+		schemaDiff diff.SchemaDiff
+		repo       string
+		want       map[string]MissingDocDetails
+	}{
+		{
+			name: "doc file not exist",
+			schemaDiff: diff.SchemaDiff{
+				"a_resource": diff.ResourceDiff{
+					Fields: map[string]diff.FieldDiff{
+						"field_one": {
+							New: &schema.Schema{},
+						},
+						"field_one.a": {
+							New: &schema.Schema{},
+						},
+						"field_one.b": {
+							New: &schema.Schema{},
+						},
+						"field_two.a": {
+							New: &schema.Schema{},
+							Old: &schema.Schema{},
+						},
+						"field_two.b": {
+							New: &schema.Schema{},
+						},
+						"field_three": {
+							New: &schema.Schema{
+								Computed: true,
+								Optional: true,
+							},
+						},
+						"field_four": {
+							New: &schema.Schema{
+								Computed: true,
+							},
+						},
+					},
+				},
+			},
+			repo: t.TempDir(),
+			want: map[string]MissingDocDetails{
+				"a_resource": {
+					FilePath: "/website/docs/r/a_resource.html.markdown",
+					Fields:   []string{"field_one", "field_one.a", "field_one.b", "field_two.b", "field_three", "field_four"},
+				},
+			},
+		},
+		{
+			name: "doc file exist",
+			schemaDiff: diff.SchemaDiff{
+				"a_resource": diff.ResourceDiff{
+					Fields: map[string]diff.FieldDiff{
+						"field_one": {
+							New: &schema.Schema{},
+						},
+						"field_one.a": {
+							New: &schema.Schema{},
+						},
+						"field_one.b": {
+							New: &schema.Schema{},
+						},
+						"field_two.a": {
+							New: &schema.Schema{},
+							Old: &schema.Schema{},
+						},
+						"field_two.b": {
+							New: &schema.Schema{},
+						},
+						"field_three": {
+							New: &schema.Schema{
+								Computed: true,
+								Optional: true,
+							},
+						},
+						"field_four": {
+							New: &schema.Schema{
+								Computed: true,
+							},
+						},
+					},
+				},
+			},
+			repo: "../testdata",
+			want: map[string]MissingDocDetails{
+				"a_resource": {
+					FilePath: "/website/docs/r/a_resource.html.markdown",
+					Fields:   []string{"field_one.b", "field_two.b", "field_three", "field_four"},
+				},
+			},
+		},
+		{
+			name: "nested new field missing doc",
+			schemaDiff: diff.SchemaDiff{
+				"a_resource": diff.ResourceDiff{
+					Fields: map[string]diff.FieldDiff{
+						"field_one.c": {
+							New: &schema.Schema{},
+						},
+					},
+				},
+			},
+			repo: "../testdata",
+			want: map[string]MissingDocDetails{
+				"a_resource": {
+					FilePath: "/website/docs/r/a_resource.html.markdown",
+					Fields:   []string{"field_one.c"},
+				},
+			},
+		},
+		{
+			name: "member and members is member/members in doc",
+			schemaDiff: diff.SchemaDiff{
+				"a_resource": diff.ResourceDiff{
+					Fields: map[string]diff.FieldDiff{
+						"member": {
+							New: &schema.Schema{},
+						},
+						"members": {
+							New: &schema.Schema{},
+						},
+					},
+				},
+			},
+			repo: "../testdata",
+			want: map[string]MissingDocDetails{},
+		},
+	} {
+		t.Run(test.name, func(t *testing.T) {
+			got, err := DetectMissingDocs(test.schemaDiff, test.repo, resourceSchema)
+			if err != nil {
+				t.Fatalf("DetectMissingDocs = %v, want = nil", err)
+			}
+			for r := range test.want {
+				sort.Strings(test.want[r].Fields)
+			}
+			for r := range got {
+				sort.Strings(got[r].Fields)
+			}
+			if diff := cmp.Diff(test.want, got); diff != "" {
+				t.Errorf("DetectMissingDocs =  %v, want = %v", got, test.want)
+			}
+		})
+	}
+}
diff --git a/tools/diff-processor/documentparser/document_parser.go b/tools/diff-processor/documentparser/document_parser.go
new file mode 100644
index 000000000000..28969906c665
--- /dev/null
+++ b/tools/diff-processor/documentparser/document_parser.go
@@ -0,0 +1,202 @@
+package documentparser
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+const (
+	nestedNamePattern = `\(#(nested_[a-z0-9_]+)\)`
+
+	itemNamePattern   = "\\* `([a-z0-9_\\./]+)`"
+	nestedLinkPattern = `<a\s+name="([a-z0-9_]+)">`
+
+	sectionSeparator      = "## "
+	nestedObjectSeparator = `<a name="nested_`
+	listItemSeparator     = "* `"
+)
+
+// DocumentParser parse *.html.markdown resource doc files.
+type DocumentParser struct {
+	argumentRoot   *node
+	attriibuteRoot *node
+}
+
+type node struct {
+	name     string
+	children []*node
+	text     string
+}
+
+func NewParser() *DocumentParser {
+	return &DocumentParser{}
+}
+
+func (d *DocumentParser) Arguments() []string {
+	var paths []string
+	traverse(
+		&paths,
+		"",
+		d.argumentRoot,
+	)
+	sort.Strings(paths)
+	return paths
+}
+
+func traverse(paths *[]string, path string, n *node) {
+	if n == nil {
+		return
+	}
+	var curPath string
+	if path != "" {
+		curPath = path + "." + n.name
+	} else {
+		curPath = n.name
+	}
+	if curPath != "" {
+		*paths = append(*paths, curPath)
+	}
+	for _, c := range n.children {
+		traverse(paths, curPath, c)
+	}
+}
+
+func (d *DocumentParser) Attributes() []string {
+	var paths []string
+	traverse(
+		&paths,
+		"",
+		d.attriibuteRoot,
+	)
+	sort.Strings(paths)
+	return paths
+}
+
+// Parse parse a resource document markdown's arguments and attributes section.
+// The parsed file format is defined in mmv1/templates/terraform/resource.html.markdown.tmpl.
+func (d *DocumentParser) Parse(src []byte) error {
+	var argument, attribute string
+	for _, p := range strings.Split(string(src), "\n"+sectionSeparator) {
+		if strings.HasPrefix(p, "Attributes Reference") {
+			attribute = p
+		}
+		if strings.HasPrefix(p, "Argument Reference") {
+			argument = p
+		}
+	}
+	if len(argument) != 0 {
+		argumentParts := strings.Split(argument, "- - -")
+		for _, part := range argumentParts {
+			n, err := d.parseSection(part)
+			if err != nil {
+				return err
+			}
+			if d.argumentRoot == nil {
+				d.argumentRoot = n
+			} else {
+				d.argumentRoot.children = append(d.argumentRoot.children, n.children...)
+			}
+		}
+	}
+	if len(attribute) != 0 {
+		n, err := d.parseSection(attribute)
+		if err != nil {
+			return err
+		}
+		d.attriibuteRoot = n
+	}
+	return nil
+}
+
+func (d *DocumentParser) parseSection(input string) (*node, error) {
+	parts := strings.Split(input, "\n"+nestedObjectSeparator)
+	nestedBlock := make(map[string]string)
+	for _, p := range parts[1:] {
+		nestedName, err := findPattern(nestedObjectSeparator+p, nestedLinkPattern)
+		if err != nil {
+			return nil, err
+		}
+		if nestedName == "" {
+			return nil, fmt.Errorf("could not find nested object name in %s", nestedObjectSeparator+p)
+		}
+		nestedBlock[nestedName] = p
+	}
+	// bfs to traverse the first part without nested blocks.
+	root := &node{
+		text: parts[0],
+	}
+	if err := d.bfs(root, nestedBlock); err != nil {
+		return nil, err
+	}
+	return root, nil
+}
+
+func (d *DocumentParser) bfs(root *node, nestedBlock map[string]string) error {
+	if root == nil {
+		return fmt.Errorf("no node to visit")
+	}
+	queue := []*node{root}
+
+	for len(queue) > 0 {
+		l := len(queue)
+		for _, cur := range queue {
+			// the separator should always at the beginning of the line
+			items := strings.Split(cur.text, "\n"+listItemSeparator)
+			for _, item := range items[1:] {
+				text := listItemSeparator + item
+				itemName, err := findItemName(text)
+				if err != nil {
+					return err
+				}
+				// There is a special case in some hand written resource eg. in compute_instance, where its attributes is in a.0.b.0.c format.
+				itemName = strings.ReplaceAll(itemName, ".0.", ".")
+				nestedName, err := findNestedName(text)
+				if err != nil {
+					return err
+				}
+				newNode := &node{
+					name: itemName,
+				}
+				cur.children = append(cur.children, newNode)
+				if text, ok := nestedBlock[nestedName]; ok {
+					newNode.text = text
+					queue = append(queue, newNode)
+				}
+			}
+
+		}
+		queue = queue[l:]
+	}
+	return nil
+}
+
+func findItemName(text string) (name string, err error) {
+	name, err = findPattern(text, itemNamePattern)
+	if err != nil {
+		return "", err
+	}
+	if name == "" {
+		return "", fmt.Errorf("cannot find item name from %s", text)
+	}
+	return
+}
+
+func findPattern(text string, pattern string) (string, error) {
+	re, err := regexp.Compile(pattern)
+	if err != nil {
+		return "", err
+	}
+	match := re.FindStringSubmatch(text)
+
+	if match != nil {
+		return match[1], nil
+	}
+	return "", nil
+}
+
+func findNestedName(text string) (string, error) {
+	s := strings.ReplaceAll(text, "\n", "")
+	return findPattern(s, nestedNamePattern)
+}
diff --git a/tools/diff-processor/documentparser/document_parser_test.go b/tools/diff-processor/documentparser/document_parser_test.go
new file mode 100644
index 000000000000..d48df5f184e8
--- /dev/null
+++ b/tools/diff-processor/documentparser/document_parser_test.go
@@ -0,0 +1,116 @@
+package documentparser
+
+import (
+	"os"
+	"sort"
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func TestParse(t *testing.T) {
+	b, err := os.ReadFile("../testdata/resource.html.markdown")
+	if err != nil {
+		t.Fatal(err)
+	}
+	parser := NewParser()
+	if err := parser.Parse(b); err != nil {
+		t.Fatal(err)
+	}
+	wantArguments := []string{
+		"boot_disk",
+		"boot_disk.auto_delete",
+		"boot_disk.device_name",
+		"boot_disk.disk_encryption_key_raw",
+		"boot_disk.initialize_params",
+		"boot_disk.initialize_params.enable_confidential_compute",
+		"boot_disk.initialize_params.image",
+		"boot_disk.initialize_params.labels",
+		"boot_disk.initialize_params.provisioned_iops",
+		"boot_disk.initialize_params.provisioned_throughput",
+		"boot_disk.initialize_params.resource_manager_tags",
+		"boot_disk.initialize_params.size",
+		"boot_disk.initialize_params.storage_pool",
+		"boot_disk.initialize_params.type",
+		"boot_disk.kms_key_self_link",
+		"boot_disk.mode",
+		"boot_disk.source",
+		"name",
+		"network_interface",
+		"network_interface.access_config",
+		"network_interface.access_config.nat_ip",
+		"network_interface.access_config.network_tier",
+		"network_interface.access_config.public_ptr_domain_name",
+		"network_interface.alias_ip_range",
+		"network_interface.alias_ip_range.ip_cidr_range",
+		"network_interface.alias_ip_range.subnetwork_range_name",
+		"network_interface.ipv6_access_config",
+		"network_interface.ipv6_access_config.external_ipv6",
+		"network_interface.ipv6_access_config.external_ipv6_prefix_length",
+		"network_interface.ipv6_access_config.name",
+		"network_interface.ipv6_access_config.network_tier",
+		"network_interface.ipv6_access_config.public_ptr_domain_name",
+		"network_interface.network",
+		"network_interface.network_attachment",
+		"network_interface.network_ip",
+		"network_interface.nic_type",
+		"network_interface.queue_count",
+		"network_interface.security_policy",
+		"network_interface.stack_type",
+		"params",
+		// "params.resource_manager_tags", // params text does not include a nested tag
+		"zone",
+		"labels",
+		"description",
+		"traffic_port_selector",
+		"traffic_port_selector.ports",
+		"project",
+	}
+	wantAttributes := []string{
+		"id",
+		"network_interface.access_config.nat_ip",
+		"workload_identity_config",
+		"errors",
+		"workload_identity_config.identity_provider",
+		"workload_identity_config.issuer_uri",
+		"workload_identity_config.workload_pool",
+		"errors.message",
+	}
+	gotArguments := parser.Arguments()
+	gotAttributes := parser.Attributes()
+	for _, arr := range [][]string{gotArguments, wantArguments, gotAttributes, wantAttributes} {
+		sort.Strings(arr)
+	}
+	if diff := cmp.Diff(wantArguments, gotArguments); diff != "" {
+		t.Errorf("Parse returned diff in arguments(-want, +got): %s", diff)
+	}
+	if diff := cmp.Diff(wantAttributes, gotAttributes); diff != "" {
+		t.Errorf("Parse returned diff in attributes(-want, +got): %s", diff)
+	}
+}
+
+func TestTraverse(t *testing.T) {
+	n1 := &node{name: "n1"}
+	n2 := &node{name: "n2"}
+	n3 := &node{name: "n3"}
+	n4 := &node{name: "n4"}
+	root := &node{
+		children: []*node{n1, n2, n3},
+	}
+	n1.children = []*node{n4}
+	n2.children = []*node{n4}
+
+	var paths []string
+	traverse(&paths, "", root)
+
+	wantPaths := []string{
+		"n1",
+		"n1.n4",
+		"n2",
+		"n2.n4",
+		"n3",
+	}
+	if diff := cmp.Diff(wantPaths, paths); diff != "" {
+		t.Errorf("traverse returned diff(-want, +got): %s", diff)
+	}
+}
diff --git a/tools/diff-processor/testdata/resource.html.markdown b/tools/diff-processor/testdata/resource.html.markdown
new file mode 100644
index 000000000000..b06fc5b13984
--- /dev/null
+++ b/tools/diff-processor/testdata/resource.html.markdown
@@ -0,0 +1,285 @@
+---
+subcategory: "Compute Engine"
+description: |-
+  Manages abcdefg.
+---
+
+# google_test_resource
+
+This resource combines some sections in google_compute_instance, google_container_attached_cluster, network_services_endpoint_policy and irrelvant parts are trimmed. 
+
+## Example Usage
+
+Lorem ipsum
+
+## Example usage - Confidential Computing
+
+Lorem ipsum
+
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `boot_disk` - (Required) The boot disk for the instance.
+    Structure is [documented below](#nested_boot_disk).
+
+* `name` - (Required) A unique name for the resource, required by GCE.
+    Changing this forces a new resource to be created.
+
+* `zone` - (Optional) The zone that the machine should be created in. If it is not provided, the provider zone is used.
+
+* `network_interface` - (Required) Networks to attach to the instance. This can
+    be specified multiple times. Structure is [documented below](#nested_network_interface).
+
+* `params` - (Optional) Additional instance parameters.
+
+---
+
+<a name="nested_boot_disk"></a>The `boot_disk` block supports:
+
+* `auto_delete` - (Optional) Whether the disk will be auto-deleted when the instance
+    is deleted. Defaults to true.
+
+* `device_name` - (Optional) Name with which attached disk will be accessible.
+    On the instance, this device will be `/dev/disk/by-id/google-{{device_name}}`.
+
+* `mode` - (Optional) The mode in which to attach this disk, either `READ_WRITE`
+  or `READ_ONLY`. If not specified, the default is to attach the disk in `READ_WRITE` mode.
+
+* `disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
+    (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
+    encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
+    to encrypt this disk. Only one of `kms_key_self_link` and `disk_encryption_key_raw`
+    may be set.
+
+* `kms_key_self_link` - (Optional) The self_link of the encryption key that is
+    stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link`
+    and `disk_encryption_key_raw` may be set.
+
+* `initialize_params` - (Optional) Parameters for a new disk that will be created
+    alongside the new instance. Either `initialize_params` or `source` must be set.
+    Structure is [documented below](#nested_initialize_params).
+
+* `source` - (Optional) The name or self_link of the existing disk (such as those managed by
+    `google_compute_disk`) or disk image. To create an instance from a snapshot, first create a
+    `google_compute_disk` from a snapshot and reference it here.
+
+<a name="nested_initialize_params"></a>The `initialize_params` block supports:
+
+* `size` - (Optional) The size of the image in gigabytes. If not specified, it
+    will inherit the size of its base image.
+
+* `type` - (Optional) The GCE disk type. Such as pd-standard, pd-balanced or pd-ssd.
+
+* `image` - (Optional) The image from which to initialize this disk. This can be
+    one of: the image's `self_link`, `projects/{project}/global/images/{image}`,
+    `projects/{project}/global/images/family/{family}`, `global/images/{image}`,
+    `global/images/family/{family}`, `family/{family}`, `{project}/{family}`,
+    `{project}/{image}`, `{family}`, or `{image}`. If referred by family, the
+    images names must include the family name. If they don't, use the
+    [google_compute_image data source](/docs/providers/google/d/compute_image.html).
+    For instance, the image `centos-6-v20180104` includes its family name `centos-6`.
+    These images can be referred by family name here.
+
+* `labels` - (Optional) A set of key/value label pairs assigned to the disk. This
+    field is only applicable for persistent disks.
+
+* `resource_manager_tags` - (Optional) A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource.
+
+* `provisioned_iops` - (Optional) Indicates how many IOPS to provision for the disk.
+    This sets the number of I/O operations per second that the disk can handle.
+    For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).
+    Note: Updating currently is only supported for hyperdisk skus via disk update
+    api/gcloud without the need to delete and recreate the disk, hyperdisk allows
+    for an update of IOPS every 4 hours. To update your hyperdisk more frequently,
+    you'll need to manually delete and recreate it.
+
+* `provisioned_throughput` - (Optional) Indicates how much throughput to provision for the disk.
+    This sets the number of throughput mb per second that the disk can handle.
+    For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks).
+    Note: Updating currently is only supported for hyperdisk skus via disk update
+    api/gcloud without the need to delete and recreate the disk, hyperdisk allows
+    for an update of throughput every 4 hours. To update your hyperdisk more
+    frequently, you'll need to manually delete and recreate it.
+
+* `enable_confidential_compute` - (Optional) Whether this disk is using confidential compute mode.
+    Note: Only supported on hyperdisk skus, disk_encryption_key is required when setting to true.
+
+* `storage_pool` - (Optional) The URL of the storage pool in which the new disk is created.
+    For example:
+    * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool}
+    * /projects/{project}/zones/{zone}/storagePools/{storagePool}
+
+
+<a name="nested_network_interface"></a>The `network_interface` block supports:
+
+* `network` - (Optional) The name or self_link of the network to attach this interface to.
+    Either `network` or `subnetwork` must be provided. If network isn't provided it will
+    be inferred from the subnetwork.
+
+*  `subnetwork` - (Optional) The name or self_link of the subnetwork to attach this
+    interface to. Either `network` or `subnetwork` must be provided. If network isn't provided
+    it will be inferred from the subnetwork. The subnetwork must exist in the same region this
+    instance will be created in. If the network resource is in
+    [legacy](https://cloud.google.com/vpc/docs/legacy) mode, do not specify this field. If the
+    network is in auto subnet mode, specifying the subnetwork is optional. If the network is
+    in custom subnet mode, specifying the subnetwork is required.
+
+
+*  `subnetwork_project` - (Optional) The project in which the subnetwork belongs.
+   If the `subnetwork` is a self_link, this field is ignored in favor of the project
+   defined in the subnetwork self_link. If the `subnetwork` is a name and this
+   field is not provided, the provider project is used.
+
+* `network_ip` - (Optional) The private IP address to assign to the instance. If
+    empty, the address will be automatically assigned.
+
+* `access_config` - (Optional) Access configurations, i.e. IPs via which this
+    instance can be accessed via the Internet. Omit to ensure that the instance
+    is not accessible from the Internet. If omitted, ssh provisioners will not
+    work unless Terraform can send traffic to the instance's network (e.g. via
+    tunnel or because it is running on another cloud instance on that network).
+    This block can be repeated multiple times. Structure [documented below](#nested_access_config).
+
+* `alias_ip_range` - (Optional) An
+    array of alias IP ranges for this network interface. Can only be specified for network
+    interfaces on subnet-mode networks. Structure [documented below](#nested_alias_ip_range).
+
+* `nic_type` - (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET.
+
+* `network_attachment` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The URL of the network attachment that this interface should connect to in the following format: `projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}`.
+
+* `stack_type` - (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used.
+
+* `ipv6_access_config` - (Optional) An array of IPv6 access configurations for this interface.
+Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig
+specified, then this instance will have no external IPv6 Internet access. Structure [documented below](#nested_ipv6_access_config).
+
+* `queue_count` - (Optional) The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.
+
+* `security_policy` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.
+
+<a name="nested_access_config"></a>The `access_config` block supports:
+
+* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's
+    network ip. If not given, one will be generated.
+
+* `public_ptr_domain_name` - (Optional) The DNS domain name for the public PTR record.
+    To set this field on an instance, you must be verified as the owner of the domain.
+    See [the docs](https://cloud.google.com/compute/docs/instances/create-ptr-record) for how
+    to become verified as a domain owner.
+
+* `network_tier` - (Optional) The [networking tier](https://cloud.google.com/network-tiers/docs/overview) used for configuring this instance.
+    This field can take the following values: PREMIUM, FIXED_STANDARD or STANDARD. If this field is
+    not specified, it is assumed to be PREMIUM.
+
+<a name="nested_ipv6_access_config"></a>The `ipv6_access_config` block supports:
+
+* `external_ipv6` - (Optional) The first IPv6 address of the external IPv6 range associated
+    with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig.
+    To use a static external IP address, it must be unused and in the same region as the instance's zone.
+    If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.
+
+* `external_ipv6_prefix_length` - (Optional) The prefix length of the external IPv6 range.
+
+* `name` - (Optional) The name of this access configuration. In ipv6AccessConfigs, the recommended name
+    is "External IPv6".
+
+* `network_tier` - (Optional) The service-level to be provided for IPv6 traffic when the
+    subnet has an external subnet. Only PREMIUM or STANDARD tier is valid for IPv6.
+
+* `public_ptr_domain_name` - (Optional) The domain name to be used when creating DNSv6
+    records for the external IPv6 ranges..
+
+<a name="nested_alias_ip_range"></a>The `alias_ip_range` block supports:
+
+* `ip_cidr_range` - The IP CIDR range represented by this alias IP range. This IP CIDR range
+    must belong to the specified subnetwork and cannot contain IP addresses reserved by
+    system or used by other network interfaces. This range may be a single IP address
+    (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24).
+
+* `subnetwork_range_name` - (Optional) The subnetwork secondary range name specifying
+    the secondary range from which to allocate the IP CIDR range for this alias IP
+    range. If left unspecified, the primary range of the subnetwork will be used.
+
+<a name="nested_params"></a>The `params` block supports:
+
+* `resource_manager_tags` (Optional) - A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource.
+
+- - -
+
+
+* `labels` -
+  (Optional)
+  Set of label tags associated with the TcpRoute resource.
+  **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
+  Please refer to the field `effective_labels` for all of the labels present on the resource.
+
+* `description` -
+  (Optional)
+  A free-text description of the resource. Max length 1024 characters.
+
+* `traffic_port_selector` -
+  (Optional)
+  Port selector for the (matched) endpoints. If no port selector is provided, the matched config is applied to all ports.
+  Structure is [documented below](#nested_traffic_port_selector).
+
+* `project` - (Optional) The ID of the project in which the resource belongs.
+    If it is not provided, the provider project is used.
+
+
+<a name="nested_traffic_port_selector"></a>The `traffic_port_selector` block supports:
+
+* `ports` -
+  (Required)
+  List of ports. Can be port numbers or port range (example, [80-90] specifies all ports from 80 to 90, including 80 and 90) or named ports or * to specify all ports. If the list is empty, all ports are selected.
+
+
+## Attributes Reference
+
+In addition to the arguments listed above, the following computed attributes are
+exported:
+
+* `id` - an identifier for the resource with format `projects/{{project}}/zones/{{zone}}/instances/{{name}}`
+
+* `network_interface.0.access_config.0.nat_ip` - If the instance has an access config, either the given external ip (in the `nat_ip` field) or the ephemeral (generated) ip (if you didn't provide one).
+
+* `workload_identity_config` -
+  Workload Identity settings.
+  Structure is [documented below](#nested_workload_identity_config).
+
+* `errors` -
+  A set of errors found in the cluster.
+  Structure is [documented below](#nested_errors).
+ 
+
+<a name="nested_workload_identity_config"></a>The `workload_identity_config` block contains:
+
+* `identity_provider` -
+  (Optional)
+  The ID of the OIDC Identity Provider (IdP) associated to
+  the Workload Identity Pool.
+
+* `issuer_uri` -
+  (Optional)
+  The OIDC issuer URL for this cluster.
+
+* `workload_pool` -
+  (Optional)
+  The Workload Identity Pool associated to the cluster.
+
+<a name="nested_errors"></a>The `errors` block contains:
+
+* `message` -
+  (Optional)
+  Human-friendly description of the error.
+
+## Timeouts
+
+Lorem ipsum
+
+## Import
+
+Lorem ipsum
+
diff --git a/tools/diff-processor/testdata/website/docs/r/a_resource.html.markdown b/tools/diff-processor/testdata/website/docs/r/a_resource.html.markdown
new file mode 100644
index 000000000000..9d3f5a0cde76
--- /dev/null
+++ b/tools/diff-processor/testdata/website/docs/r/a_resource.html.markdown
@@ -0,0 +1,18 @@
+## Some resource description
+
+## Argument Reference
+
+* `field_one` lorem ipsum. Structure is [documented below](#nested_field_one).
+* `member/members` - (Required) lorem ipsum.
+
+<a name="nested_field_one"></a>The `field_one` block supports:
+
+* `a` - (Optional) lorem ipsum.
+
+## Attributes Reference
+
+* `field_two` lorem ipsum. Structure is [documented below](#nested_field_two).
+
+<a name="nested_field_two"></a>The `field_two` block supports:
+
+* `a` - (Optional) lorem ipsum.
\ No newline at end of file