diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index c07e82039..07b8856eb 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -741,7 +741,8 @@ async def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the search context and results for a scrolling search. + Clear a scrolling search. Clear the search context and results for a scrolling + search. ``_ @@ -791,7 +792,11 @@ async def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes a point-in-time. + Close a point in time. A point in time must be opened explicitly before being + used in search requests. The `keep_alive` parameter tells Elasticsearch how long + it should persist. A point in time is automatically closed when the `keep_alive` + period has elapsed. However, keeping points in time has a cost; close them as + soon as they are no longer required for search requests. ``_ @@ -1404,7 +1409,10 @@ async def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Delete By Query operation. + Throttle a delete by query operation. Change the number of requests per second + for a particular delete by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. ``_ @@ -1839,10 +1847,11 @@ async def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The field capabilities API returns the information about the capabilities of - fields among multiple indices. The field capabilities API returns runtime fields - like any other field. For example, a runtime field with a type of keyword is - returned as any other field that belongs to the `keyword` family. + Get the field capabilities. Get information about the capabilities of fields + among multiple indices. For data streams, the API returns field capabilities + among the stream’s backing indices. It returns runtime fields like any other + field. For example, a runtime field with a type of keyword is returned the same + as any other field that belongs to the `keyword` family. ``_ @@ -2090,7 +2099,7 @@ async def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all script contexts. + Get script contexts. Get a list of supported script contexts and their methods. ``_ """ @@ -2125,7 +2134,7 @@ async def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns available script types, languages and contexts + Get script languages. Get a list of available script types, languages, and contexts. ``_ """ @@ -2505,7 +2514,15 @@ async def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs a kNN search. + Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option + in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector + field and return the matching documents. Given a query vector, the API finds + the k closest vectors and returns those documents as search hits. Elasticsearch + uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, + HNSW is an approximate method that sacrifices result accuracy for improved search + speed. This means the results returned are not always the true k closest neighbors. + The kNN search API supports restricting the search using a filter. The search + will return the top k documents that also match the filter query. ``_ @@ -2606,7 +2623,10 @@ async def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to get multiple documents in one request. + Get multiple documents. Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document + IDs in the request body. To ensure fast responses, this multi get (mget) API + responds with partial results if one or more shards fail. ``_ @@ -2727,7 +2747,13 @@ async def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search operations in one request. + Run multiple searches. The format of the request is similar to the bulk API format + and makes use of the newline delimited JSON (NDJSON) format. The structure is + as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically + optimized to reduce parsing if a specific search ends up redirected to another + node. IMPORTANT: The final line of data must end with a newline character `\\n`. + Each newline character may be preceded by a carriage return `\\r`. When sending + requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. ``_ @@ -2859,7 +2885,7 @@ async def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs multiple templated searches with a single request. + Run multiple templated searches. ``_ @@ -2954,7 +2980,11 @@ async def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns multiple termvectors in one request. + Get multiple term vectors. You can specify existing documents by index and ID + or provide artificial documents in the body of the request. You can specify the + index in the request body or request URI. The response contains a `docs` array + with all the fetched termvectors. Each element has the structure provided by + the termvectors API. ``_ @@ -3065,13 +3095,15 @@ async def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - A search request by default executes against the most recent visible data of - the target indices, which is called point in time. Elasticsearch pit (point in - time) is a lightweight view into the state of the data as it existed when initiated. - In some cases, it’s preferred to perform multiple search requests using the same - point in time. For example, if refreshes happen between `search_after` requests, - then the results of those requests might not be consistent as changes happening - between searches are only visible to the more recent point in time. + Open a point in time. A search request by default runs against the most recent + visible data of the target indices, which is called point in time. Elasticsearch + pit (point in time) is a lightweight view into the state of the data as it existed + when initiated. In some cases, it’s preferred to perform multiple search requests + using the same point in time. For example, if refreshes happen between `search_after` + requests, then the results of those requests might not be consistent as changes + happening between searches are only visible to the more recent point in time. + A point in time must be opened explicitly before being used in search requests. + The `keep_alive` parameter tells Elasticsearch how long it should persist. ``_ @@ -3238,8 +3270,8 @@ async def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to evaluate the quality of ranked search results over a set of typical - search queries. + Evaluate ranked search results. Evaluate the quality of ranked search results + over a set of typical search queries. ``_ @@ -3431,7 +3463,8 @@ async def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Copies documents from a source to a destination. + Throttle a reindex operation. Change the number of requests per second for a + particular reindex operation. ``_ @@ -3482,7 +3515,7 @@ async def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Renders a search template as a search request body. + Render a search template. Render a search template as a search request body. ``_ @@ -3608,7 +3641,22 @@ async def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to retrieve a large numbers of results from a single search request. + Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for + deep pagination. If you need to preserve the index state while paging through + more than 10,000 hits, use the `search_after` parameter with a point in time + (PIT). The scroll API gets large sets of results from a single scrolling search + request. To get the necessary scroll ID, submit a search API request that includes + an argument for the `scroll` query parameter. The `scroll` parameter indicates + how long Elasticsearch should retain the search context for the request. The + search response returns a scroll ID in the `_scroll_id` response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch + of results for the request. If the Elasticsearch security features are enabled, + the access to the results of a specific scroll ID is restricted to the user or + API key that submitted the search. You can also use the scroll API to specify + a new scroll parameter that extends or shortens the retention period for the + search context. IMPORTANT: Results from a scrolling search reflect the state + of the index at the time of the initial search request. Subsequent indexing or + document changes only affect later search and scroll requests. ``_ @@ -3798,9 +3846,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns search hits that match the query defined in the request. You can provide - search queries using the `q` query string parameter or the request body. If both - are specified, only the query parameter is used. + Run a search. Get search hits that match the query defined in the request. You + can provide search queries using the `q` query string parameter or the request + body. If both are specified, only the query parameter is used. ``_ @@ -4230,7 +4278,7 @@ async def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Searches a vector tile for geospatial values. + Search a vector tile. Search a vector tile for geospatial values. ``_ @@ -4384,8 +4432,10 @@ async def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the indices and shards that a search request would - be executed against. + Get the search shards. Get the indices and shards that a search request would + be run against. This information can be useful for working out issues or planning + optimizations with routing and shard preferences. When filtered aliases are used, + the filter is returned as part of the indices section. ``_ @@ -4486,7 +4536,7 @@ async def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search with a search template. + Run a search with a search template. ``_ @@ -4618,9 +4668,15 @@ async def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto-complete - scenarios. + Get terms in an index. Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete + scenarios. If the `complete` property in the response is false, the returned + terms set may be incomplete and should be treated as approximate. This can occur + due to a few reasons, such as a request timeout or a node error. NOTE: The terms + enum API may return terms from deleted documents. Deleted documents are initially + only marked as deleted. It is not until their segments are merged that documents + are actually deleted. Until that happens, the terms enum API will return terms + from these documents. ``_ @@ -4718,8 +4774,8 @@ async def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Returns information and statistics about terms in - the fields of a particular document. + Get term vector information. Get information and statistics about terms in the + fields of a particular document. ``_ @@ -5224,7 +5280,10 @@ async def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Update By Query operation. + Throttle an update by query operation. Change the number of requests per second + for a particular update by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. ``_ diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 74bfdc0fc..1f556c404 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -36,11 +36,11 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by identifier. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. If - the Elasticsearch security features are enabled, the deletion of a specific async - search is restricted to: the authenticated user that submitted the original search - request; users that have the `cancel_task` cluster privilege. + Delete an async search. If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. If the Elasticsearch security + features are enabled, the deletion of a specific async search is restricted to: + the authenticated user that submitted the original search request; users that + have the `cancel_task` cluster privilege. ``_ @@ -85,9 +85,9 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the results of a previously submitted async search request given its - identifier. If the Elasticsearch security features are enabled, access to the - results of a specific async search is restricted to the user or API key that + Get async search results. Retrieve the results of a previously submitted asynchronous + search request. If the Elasticsearch security features are enabled, access to + the results of a specific async search is restricted to the user or API key that submitted it. ``_ @@ -148,10 +148,10 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status Retrieves the status of a previously submitted async - search request given its identifier, without retrieving search results. If the - Elasticsearch security features are enabled, use of this API is restricted to - the `monitoring_user` role. + Get the async search status. Get the status of a previously submitted async search + request given its identifier, without retrieving search results. If the Elasticsearch + security features are enabled, use of this API is restricted to the `monitoring_user` + role. ``_ @@ -323,15 +323,15 @@ async def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search request asynchronously. When the primary sort of the results is - an indexed field, shards get sorted based on minimum and maximum value that they - hold for that field, hence partial results become available following the sort - criteria that was requested. Warning: Async search does not support scroll nor - search requests that only include the suggest section. By default, Elasticsearch - doesn’t allow you to store an async search response larger than 10Mb and an attempt - to do this results in an error. The maximum allowed size for a stored async search - response can be set by changing the `search.max_async_search_response_size` cluster - level setting. + Run an async search. When the primary sort of the results is an indexed field, + shards get sorted based on minimum and maximum value that they hold for that + field. Partial results become available following the sort criteria that was + requested. Warning: Asynchronous search does not support scroll or search requests + that include only the suggest section. By default, Elasticsearch does not allow + you to store an async search response larger than 10Mb and an attempt to do this + results in an error. The maximum allowed size for a stored async search response + can be set by changing the `search.max_async_search_response_size` cluster level + setting. ``_ diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index b558e94a5..a2f869867 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -36,7 +36,8 @@ async def delete_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Delete an autoscaling policy. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -76,8 +77,18 @@ async def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets the current autoscaling capacity based on the configured autoscaling policy. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the autoscaling capacity. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. This API gets the current autoscaling capacity based + on the configured autoscaling policy. It will return information to size the + cluster appropriately to the current workload. The `required_capacity` is calculated + as the maximum of the `required_capacity` result of all individual deciders that + are enabled for the policy. The operator should verify that the `current_nodes` + match the operator’s knowledge of the cluster to avoid making autoscaling decisions + based on stale or incomplete information. The response contains decider-specific + information you can use to diagnose how and why autoscaling determined a certain + capacity was required. This information is provided for diagnosis only. Do not + use this information to make autoscaling decisions. ``_ """ @@ -113,7 +124,8 @@ async def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Get an autoscaling policy. NOTE: This feature is designed for indirect use by + Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -158,8 +170,9 @@ async def put_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. - Direct use is not supported. + Create or update an autoscaling policy. NOTE: This feature is designed for indirect + use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on + Kubernetes. Direct use is not supported. ``_ diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index c4f7b0c05..66b794ce1 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -68,6 +68,8 @@ async def delete_auto_follow_pattern( @_rewrite_parameters( body_fields=( "leader_index", + "remote_cluster", + "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", @@ -78,29 +80,31 @@ async def delete_auto_follow_pattern( "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", - "remote_cluster", + "settings", ), ) async def follow( self, *, index: str, + leader_index: t.Optional[str] = None, + remote_cluster: t.Optional[str] = None, + data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - leader_index: t.Optional[str] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, - max_read_request_size: t.Optional[str] = None, + max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, - max_write_buffer_size: t.Optional[str] = None, + max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, - max_write_request_size: t.Optional[str] = None, + max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - remote_cluster: t.Optional[str] = None, + settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, @@ -111,26 +115,51 @@ async def follow( ``_ - :param index: The name of the follower index - :param leader_index: - :param max_outstanding_read_requests: - :param max_outstanding_write_requests: - :param max_read_request_operation_count: - :param max_read_request_size: - :param max_retry_delay: - :param max_write_buffer_count: - :param max_write_buffer_size: - :param max_write_request_operation_count: - :param max_write_request_size: - :param read_poll_timeout: - :param remote_cluster: - :param wait_for_active_shards: Sets the number of shard copies that must be active - before returning. Defaults to 0. Set to `all` for all shard copies, otherwise - set to any non-negative value less than or equal to the total number of copies - for the shard (number of replicas + 1) + :param index: The name of the follower index. + :param leader_index: The name of the index in the leader cluster to follow. + :param remote_cluster: The remote cluster containing the leader index. + :param data_stream_name: If the leader index is part of a data stream, the name + to which the local data stream for the followed index should be renamed. + :param max_outstanding_read_requests: The maximum number of outstanding reads + requests from the remote cluster. + :param max_outstanding_write_requests: The maximum number of outstanding write + requests on the follower. + :param max_read_request_operation_count: The maximum number of operations to + pull per read from the remote cluster. + :param max_read_request_size: The maximum size in bytes of per read of a batch + of operations pulled from the remote cluster. + :param max_retry_delay: The maximum time to wait before retrying an operation + that failed exceptionally. An exponential backoff strategy is employed when + retrying. + :param max_write_buffer_count: The maximum number of operations that can be queued + for writing. When this limit is reached, reads from the remote cluster will + be deferred until the number of queued operations goes below the limit. + :param max_write_buffer_size: The maximum total bytes of operations that can + be queued for writing. When this limit is reached, reads from the remote + cluster will be deferred until the total bytes of queued operations goes + below the limit. + :param max_write_request_operation_count: The maximum number of operations per + bulk write request executed on the follower. + :param max_write_request_size: The maximum total bytes of operations per bulk + write request executed on the follower. + :param read_poll_timeout: The maximum time to wait for new operations on the + remote cluster when the follower index is synchronized with the leader index. + When the timeout has elapsed, the poll for operations will return to the + follower so that it can update some statistics. Then the follower will immediately + attempt to read from the leader again. + :param settings: Settings to override from the leader index. + :param wait_for_active_shards: Specifies the number of shards to wait on being + active before responding. This defaults to waiting on none of the shards + to be active. A shard must be restored from the leader index before being + active. Restoring a follower shard requires transferring all the remote Lucene + segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if leader_index is None and body is None: + raise ValueError("Empty value passed for parameter 'leader_index'") + if remote_cluster is None and body is None: + raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} @@ -148,6 +177,10 @@ async def follow( if not __body: if leader_index is not None: __body["leader_index"] = leader_index + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster + if data_stream_name is not None: + __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: @@ -174,8 +207,8 @@ async def follow( __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout - if remote_cluster is not None: - __body["remote_cluster"] = remote_cluster + if settings is not None: + __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 8f3fe042d..fceb2896d 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -36,7 +36,8 @@ async def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the last_seen field in the connector, and sets it to current timestamp + Check in a connector. Update the `last_seen` field in the connector and set it + to the current timestamp. ``_ @@ -77,7 +78,10 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector. + Delete a connector. Removes a connector and associated sync jobs. This is a destructive + action that is not recoverable. NOTE: This action doesn’t delete any API keys, + ingest pipelines, or data indices associated with the connector. These need to + be removed manually. ``_ @@ -121,7 +125,7 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector. + Get a connector. Get the details about a connector. ``_ @@ -215,7 +219,8 @@ async def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates last sync stats in the connector document + Update the connector last sync stats. Update the fields related to the last sync + of a connector. This action is used for analytics and monitoring. ``_ @@ -309,7 +314,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns existing connectors. + Get all connectors. Get information about all connectors. ``_ @@ -383,7 +388,11 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector. + Create a connector. Connectors are Elasticsearch integrations that bring content + from third-party data sources, which can be deployed on Elastic Cloud or hosted + on your own infrastructure. Elastic managed connectors (Native connectors) are + a managed service on Elastic Cloud. Self-managed connectors (Connector clients) + are self-managed on your infrastructure. ``_ @@ -461,7 +470,7 @@ async def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a connector. + Create or update a connector. ``_ @@ -530,7 +539,10 @@ async def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a connector sync job. + Cancel a connector sync job. Cancel a connector sync job, which sets the status + to cancelling and updates `cancellation_requested_at` to the current time. The + connector service is then responsible for setting the status of connector sync + jobs to cancelled. ``_ @@ -574,7 +586,8 @@ async def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector sync job. + Delete a connector sync job. Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable. ``_ @@ -617,7 +630,7 @@ async def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector sync job. + Get a connector sync job. ``_ @@ -685,7 +698,8 @@ async def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists connector sync jobs. + Get all connector sync jobs. Get information about all stored connector sync + jobs listed by their creation date in ascending order. ``_ @@ -746,7 +760,8 @@ async def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector sync job. + Create a connector sync job. Create a connector sync job document in the internal + index and initialize its counters and timestamps with default values. ``_ @@ -797,7 +812,8 @@ async def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates the valid draft filtering for a connector. + Activate the connector draft filter. Activates the valid draft filtering for + a connector. ``_ @@ -842,7 +858,11 @@ async def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the API key id in the connector document + Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` + fields of a connector. You can specify the ID of the API key used for authorization + and the ID of the connector secret where the API key is stored. The connector + secret ID is required only for Elastic managed (native) connectors. Self-managed + connectors (connector clients) do not use this field. ``_ @@ -896,7 +916,8 @@ async def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the configuration field in the connector document + Update the connector configuration. Update the configuration field in the connector + document. ``_ @@ -949,7 +970,10 @@ async def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector error field. Set the error field for the connector. If the + error provided in the request body is non-null, the connector’s status is updated + to error. Otherwise, if the error is reset to null, the connector status is updated + to connected. ``_ @@ -1003,7 +1027,10 @@ async def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector filtering. Update the draft filtering configuration of a + connector and marks the draft validation state as edited. The filtering draft + is activated once validated by the running Elastic connector service. The filtering + property is used to configure sync rules (both basic and advanced) for a connector. ``_ @@ -1059,7 +1086,8 @@ async def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the draft filtering validation info for a connector. + Update the connector draft filtering validation. Update the draft filtering validation + info for a connector. ``_ @@ -1111,7 +1139,8 @@ async def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index_name in the connector document + Update the connector index name. Update the `index_name` field of a connector, + specifying the index where the data ingested by the connector is stored. ``_ @@ -1164,7 +1193,7 @@ async def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the name and description fields in the connector document + Update the connector name and description. ``_ @@ -1217,7 +1246,7 @@ async def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the is_native flag in the connector document + Update the connector is_native flag. ``_ @@ -1269,7 +1298,8 @@ async def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the pipeline field in the connector document + Update the connector pipeline. When you create a new connector, the configuration + of an ingest pipeline is populated with default settings. ``_ @@ -1321,7 +1351,7 @@ async def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the scheduling field in the connector document + Update the connector scheduling. ``_ @@ -1373,7 +1403,7 @@ async def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the service type of the connector + Update the connector service type. ``_ @@ -1432,7 +1462,7 @@ async def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the status of the connector + Update the connector status. ``_ diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index b65002f44..59cc838fa 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -39,13 +39,17 @@ async def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified dangling index + Delete a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to delete the dangling - index + :param index_uuid: The UUID of the index to delete. Use the get dangling indices + API to find the UUID. + :param accept_data_loss: This parameter must be set to true to acknowledge that + it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -94,13 +98,20 @@ async def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Imports the specified dangling index + Import a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to import the dangling - index + :param index_uuid: The UUID of the index to import. Use the get dangling indices + API to locate the UUID. + :param accept_data_loss: This parameter must be set to true to import a dangling + index. Because Elasticsearch cannot know where the dangling index data came + from or determine which shard copies are fresh and which are stale, it cannot + guarantee that the imported data represents the latest state of the index + when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -145,7 +156,11 @@ async def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all dangling indices. + Get the dangling indices. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. Use this API to list dangling + indices, which you can then import or delete. ``_ """ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 3944130a1..ed21ddb3d 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -174,6 +174,7 @@ async def get_status( "filter", "keep_alive", "keep_on_completion", + "max_samples_per_key", "result_position", "runtime_mappings", "size", @@ -211,6 +212,7 @@ async def search( ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, + max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -244,6 +246,11 @@ async def search( in the response. :param keep_alive: :param keep_on_completion: + :param max_samples_per_key: By default, the response of a sample query contains + up to `10` samples, with one sample per unique set of join keys. Use the + `size` parameter to get a smaller or larger set of samples. To retrieve more + than one sample per set of join keys, use the `max_samples_per_key` parameter. + Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. @@ -292,6 +299,8 @@ async def search( __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion + if max_samples_per_key is not None: + __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index f60e358c9..e9ebc2900 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -137,7 +137,8 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs analysis on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) + on a text string and returns the resulting tokens. ``_ @@ -1271,7 +1272,6 @@ async def exists_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ @@ -1292,8 +1292,6 @@ async def exists_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. - :param local: If `true`, the request retrieves information from the local node - only. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1319,8 +1317,6 @@ async def exists_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1863,7 +1859,6 @@ async def get_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1885,8 +1880,6 @@ async def get_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param local: If `true`, the request retrieves information from the local node - only. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1914,8 +1907,6 @@ async def get_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2809,14 +2800,14 @@ async def put_alias( ) @_rewrite_parameters( - body_fields=("data_retention", "downsampling"), + body_name="lifecycle", ) async def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2831,7 +2822,6 @@ async def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Update data stream lifecycles. Update the data stream lifecycle of the specified @@ -2841,13 +2831,7 @@ async def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param data_retention: If defined, every document added to this data stream will - be stored at least for this time frame. Any time after this duration the - document could be deleted. When empty, every document in this data stream - will be stored indefinitely. - :param downsampling: If defined, every backing index will execute the configured - downsampling configuration after the backing index is not the data stream - write index anymore. + :param lifecycle: :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -2859,10 +2843,15 @@ async def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") + if lifecycle is None and body is None: + raise ValueError( + "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." + ) + elif lifecycle is not None and body is not None: + raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2877,16 +2866,8 @@ async def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - if not __body: - if data_retention is not None: - __body["data_retention"] = data_retention - if downsampling is not None: - __body["downsampling"] = downsampling - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" + __body = lifecycle if lifecycle is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3864,7 +3845,6 @@ async def segments( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Returns low-level information about the Lucene segments in index shards. For @@ -3884,7 +3864,6 @@ async def segments( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param verbose: If `true`, the request returns a verbose response. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -3908,8 +3887,6 @@ async def segments( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty - if verbose is not None: - __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index cf86f37a6..be77a4c69 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -3368,7 +3368,7 @@ async def put_datafeed( Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 5f9a931ee..884d2a7ab 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -382,3 +382,56 @@ async def put_ruleset( endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("match_criteria",), + ) + async def test( + self, + *, + ruleset_id: str, + match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Creates or updates a query ruleset. + + ``_ + + :param ruleset_id: The unique identifier of the query ruleset to be created or + updated + :param match_criteria: + """ + if ruleset_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'ruleset_id'") + if match_criteria is None and body is None: + raise ValueError("Empty value passed for parameter 'match_criteria'") + __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} + __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if match_criteria is not None: + __body["match_criteria"] = match_criteria + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="query_rules.test", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index b330942a7..b005dfae7 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -44,7 +44,8 @@ async def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a user profile on behalf of another user. + Activate a user profile. Create or update a user profile on behalf of another + user. ``_ @@ -144,9 +145,9 @@ async def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk delete roles API cannot delete - roles that are defined in roles files. + Bulk delete roles. The role management APIs are generally the preferred way to + manage roles, rather than using file-based role management. The bulk delete roles + API cannot delete roles that are defined in roles files. ``_ @@ -202,9 +203,9 @@ async def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk create or update roles API cannot - update roles that are defined in roles files. + Bulk create or update roles. The role management APIs are generally the preferred + way to manage roles, rather than using file-based role management. The bulk create + or update roles API cannot update roles that are defined in roles files. ``_ @@ -262,7 +263,8 @@ async def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the passwords of users in the native realm and built-in users. + Change passwords. Change the passwords of users in the native realm and built-in + users. ``_ @@ -324,8 +326,8 @@ async def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts a subset of all entries from the API key cache. The cache is also automatically - cleared on state changes of the security index. + Clear the API key cache. Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index. ``_ @@ -366,7 +368,9 @@ async def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts application privileges from the native application privileges cache. + Clear the privileges cache. Evict privileges from the native application privilege + cache. The cache is also automatically cleared for applications that have their + privileges updated. ``_ @@ -407,8 +411,8 @@ async def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts users from the user cache. Can completely clear the cache or evict specific - users. + Clear the user cache. Evict users from the user cache. You can completely clear + the cache or evict specific users. ``_ @@ -451,7 +455,7 @@ async def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts roles from the native role cache. + Clear the roles cache. Evict roles from the native role cache. ``_ @@ -493,7 +497,8 @@ async def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts tokens from the service account token caches. + Clear service account token caches. Evict a subset of all entries from the service + account token caches. ``_ @@ -552,7 +557,7 @@ async def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Creates an API key for access without requiring basic authentication. + Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You @@ -612,6 +617,90 @@ async def create_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "name", "expiration", "metadata"), + ) + async def create_cross_cluster_api_key( + self, + *, + access: t.Optional[t.Mapping[str, t.Any]] = None, + name: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a cross-cluster API key. Create an API key of the `cross_cluster` type + for the API key based remote cluster access. A `cross_cluster` API key cannot + be used to authenticate through the REST interface. IMPORTANT: To authenticate + this request you must use a credential that is not an API key. Even if you use + an API key that has the required privilege, the API returns an error. Cross-cluster + API keys are created by the Elasticsearch API key service, which is automatically + enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture + permissions of the authenticated user. The API key’s effective permission is + exactly as specified with the `access` property. A successful request returns + a JSON structure that contains the API key, its unique ID, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. Cross-cluster API keys can only be updated with the update cross-cluster + API key API. Attempting to update them with the update REST API key API or the + bulk update REST API keys API will result in an error. + + ``_ + + :param access: The access to be granted to this API key. The access is composed + of permissions for cross-cluster search and cross-cluster replication. At + least one of them must be specified. NOTE: No explicit privileges should + be specified for either search or replication access. The creation process + automatically converts the access specification to a role descriptor which + has relevant privileges assigned accordingly. + :param name: Specifies the name for this API key. + :param expiration: Expiration time for the API key. By default, API keys never + expire. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. + """ + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + if name is None and body is None: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/cross_cluster/api_key" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if name is not None: + __body["name"] = name + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.create_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def create_service_token( self, @@ -628,7 +717,8 @@ async def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service accounts token for access without requiring basic authentication. + Create a service account token. Create a service accounts token for access without + requiring basic authentication. ``_ @@ -698,7 +788,7 @@ async def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes application privileges. + Delete application privileges. ``_ @@ -754,7 +844,7 @@ async def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes roles in the native realm. + Delete roles. Delete roles in the native realm. ``_ @@ -802,7 +892,7 @@ async def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes role mappings. + Delete role mappings. ``_ @@ -852,7 +942,8 @@ async def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a service account token. + Delete service account tokens. Delete service account tokens for a service in + a specified namespace. ``_ @@ -910,7 +1001,7 @@ async def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes users from the native realm. + Delete users. Delete users from the native realm. ``_ @@ -958,7 +1049,7 @@ async def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables users in the native realm. + Disable users. Disable users in the native realm. ``_ @@ -1006,7 +1097,8 @@ async def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables a user profile so it's not visible in user profile searches. + Disable a user profile. Disable user profiles so that they are not visible in + user profile searches. ``_ @@ -1054,7 +1146,7 @@ async def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables users in the native realm. + Enable users. Enable users in the native realm. ``_ @@ -1102,7 +1194,8 @@ async def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a user profile so it's visible in user profile searches. + Enable a user profile. Enable user profiles to make them visible in user profile + searches. ``_ @@ -1146,8 +1239,8 @@ async def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a Kibana instance to configure itself for communication with a secured - Elasticsearch cluster. + Enroll Kibana. Enable a Kibana instance to configure itself for communication + with a secured Elasticsearch cluster. ``_ """ @@ -1182,7 +1275,8 @@ async def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to join an existing cluster with security features enabled. + Enroll a node. Enroll a new node to allow it to join an existing cluster with + security features enabled. ``_ """ @@ -1303,8 +1397,8 @@ async def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the list of cluster privileges and index privileges that are available - in this version of Elasticsearch. + Get builtin privileges. Get the list of cluster privileges and index privileges + that are available in this version of Elasticsearch. ``_ """ @@ -1341,7 +1435,7 @@ async def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves application privileges. + Get application privileges. ``_ @@ -1388,9 +1482,7 @@ async def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The get roles API cannot retrieve roles - that are defined in roles files. + Get roles. Get roles in the native realm. ``_ @@ -1435,7 +1527,10 @@ async def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves role mappings. + Get role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The get role mappings API cannot retrieve + role mappings that are defined in role mapping files. ``_ @@ -1483,7 +1578,8 @@ async def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API returns a list of service accounts that match the provided path parameter(s). + Get service accounts. Get a list of service accounts that match the provided + path parameters. ``_ @@ -1534,7 +1630,7 @@ async def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information of all service credentials for a service account. + Get service account credentials. ``_ @@ -1602,7 +1698,7 @@ async def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a bearer token for access without requiring basic authentication. + Get a token. Create a bearer token for access without requiring basic authentication. ``_ @@ -1661,7 +1757,7 @@ async def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about users in the native realm and built-in users. + Get users. Get information about users in the native realm and built-in users. ``_ @@ -1712,7 +1808,7 @@ async def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves security privileges for the logged in user. + Get user privileges. ``_ @@ -1762,7 +1858,7 @@ async def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. ``_ @@ -1826,21 +1922,21 @@ async def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. This API is similar to Create API - keys, however it creates the API key for a user that is different than the user - that runs the API. The caller must have authentication credentials (either an - access token, or a username and password) for the user on whose behalf the API - key will be created. It is not possible to use this API to create an API key - without that user’s credentials. The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. A successful grant API key API call returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. By default, API keys - never expire. You can specify expiration information when you create the API - keys. + Grant an API key. Create an API key on behalf of another user. This API is similar + to the create API keys API, however it creates the API key for a user that is + different than the user that runs the API. The caller must have authentication + credentials (either an access token, or a username and password) for the user + on whose behalf the API key will be created. It is not possible to use this API + to create an API key without that user’s credentials. The user, for whom the + authentication credentials is provided, can optionally "run as" (impersonate) + another user. In this case, the API key will be created on behalf of the impersonated + user. This API is intended be used by applications that need to create and manage + API keys for end users, but cannot guarantee that those users have permission + to create API keys on their own behalf. A successful grant API key API call returns + a JSON structure that contains the API key, its unique id, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. ``_ @@ -1980,8 +2076,8 @@ async def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determines whether the specified user has a specified - list of privileges. + Check user privileges. Determine whether the specified user has a specified list + of privileges. ``_ @@ -2040,8 +2136,8 @@ async def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Determines whether the users associated with the specified profile IDs have all - the requested privileges. + Check user profile privileges. Determine whether the users associated with the + specified user profile IDs have all the requested privileges. ``_ @@ -2100,13 +2196,17 @@ async def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege - allows deleting any API keys. The `manage_own_api_key` only allows deleting API - keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, - an invalidation request must be issued in one of the three formats: - Set the - parameter `owner=true`. - Or, set both `username` and `realm_name` to match the - user’s identity. - Or, if the request is issued by an API key, i.e. an API key - invalidates itself, specify its ID in the `ids` field. + Invalidate API keys. This API invalidates API keys created by the create API + key or grant API key APIs. Invalidated API keys fail authentication, but they + can still be viewed using the get API key information and query API key information + APIs, for at least the configured retention period, until they are automatically + deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` + only allows deleting API keys that are owned by the user. In addition, with the + `manage_own_api_key` privilege, an invalidation request must be issued in one + of the three formats: - Set the parameter `owner=true`. - Or, set both `username` + and `realm_name` to match the user’s identity. - Or, if the request is issued + by an API key, that is to say an API key invalidates itself, specify its ID in + the `ids` field. ``_ @@ -2177,7 +2277,12 @@ async def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more access tokens or refresh tokens. + Invalidate a token. The access tokens returned by the get token API have a finite + period of time for which they are valid. After that time period, they can no + longer be used. The time period is defined by the `xpack.security.authc.token.timeout` + setting. The refresh tokens returned by the get token API are only valid for + 24 hours. They can also be used exactly once. If you want to invalidate one or + more access or refresh tokens immediately, use this invalidate token API. ``_ @@ -2237,7 +2342,7 @@ async def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds or updates application privileges. + Create or update application privileges. ``_ @@ -2380,9 +2485,10 @@ async def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The create or update roles API cannot - update roles that are defined in roles files. + Create or update roles. The role management APIs are generally the preferred + way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles + files. File-based role management is not available in Elastic Serverless. ``_ @@ -2491,7 +2597,14 @@ async def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates and updates role mappings. + Create or update role mappings. Role mappings define which roles are assigned + to each user. Each mapping has rules that identify users and a list of roles + that are granted to those users. The role mapping APIs are generally the preferred + way to manage role mappings rather than using role mapping files. The create + or update role mappings API cannot update role mappings that are defined in role + mapping files. This API does not create roles. Rather, it maps users to existing + roles. Roles can be created by using the create or update roles API or roles + files. ``_ @@ -2578,8 +2691,9 @@ async def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates users in the native realm. These users are commonly referred - to as native users. + Create or update users. A password is required for adding a new user but is optional + when updating an existing user. To change a user’s password without updating + any other fields, use the change password API. ``_ @@ -2676,7 +2790,7 @@ async def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query API keys. Retrieves a paginated list of API keys and their information. + Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. ``_ @@ -2803,8 +2917,8 @@ async def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in a paginated manner. You can optionally filter the results - with a query. + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -2889,8 +3003,8 @@ async def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for Users in a paginated manner. You can optionally filter - the results with a query. + Find users with a query. Get information for users in a paginated manner. You + can optionally filter the results with a query. ``_ @@ -2968,7 +3082,7 @@ async def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML Response message to Elasticsearch for consumption. + Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. ``_ @@ -3030,7 +3144,7 @@ async def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP. + Logout of SAML completely. Verifies the logout response sent from the SAML IdP. ``_ @@ -3096,7 +3210,7 @@ async def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. ``_ @@ -3163,7 +3277,7 @@ async def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a request to invalidate an access token and refresh token. + Logout of SAML. Submits a request to invalidate an access token and refresh token. ``_ @@ -3220,8 +3334,8 @@ async def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request () as a URL string, based - on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Creates a SAML authentication request (``) + as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. ``_ @@ -3276,7 +3390,8 @@ async def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generate SAML metadata for a SAML 2.0 Service Provider. + Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 + Service Provider. ``_ @@ -3322,7 +3437,8 @@ async def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get suggestions for user profiles that match specified search criteria. + Suggest a user profile. Get suggestions for user profiles that match specified + search criteria. ``_ @@ -3460,6 +3576,74 @@ async def update_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "expiration", "metadata"), + ) + async def update_cross_cluster_api_key( + self, + *, + id: str, + access: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update a cross-cluster API key. Update the attributes of an existing cross-cluster + API key, which is used for API key based remote cluster access. + + ``_ + + :param id: The ID of the cross-cluster API key to update. + :param access: The access to be granted to this API key. The access is composed + of permissions for cross cluster search and cross cluster replication. At + least one of them must be specified. When specified, the new access assignment + fully replaces the previously assigned access. + :param expiration: Expiration time for the API key. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this information + fully replaces metadata previously associated with the API key. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) @@ -3481,8 +3665,8 @@ async def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates specific data for the user profile that's associated with the specified - unique ID. + Update user profile data. Update specific data for the user profile that is associated + with a unique ID. ``_ diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index 5b9397893..987abc567 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -35,8 +35,23 @@ async def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the X.509 certificates used to encrypt communications - in the cluster. + Get SSL certificates. Get information about the X.509 certificates that are used + to encrypt communications in the cluster. The API returns a list that includes + certificates from all TLS contexts including: - Settings for transport and HTTP + interfaces - TLS settings that are used within authentication realms - TLS settings + for remote monitoring exporters The list includes certificates that are used + for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` + and `xpack.security.transport.ssl.certificate_authorities` settings. It also + includes certificates that are used for configuring server identity, such as + `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + The list does not include certificates that are sourced from the default SSL + context of the Java Runtime Environment (JRE), even if those certificates are + in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the + truststore of the JRE, the API returns all the certificates that are included + in the PKCS#11 token irrespective of whether these are used in the Elasticsearch + TLS configuration. If Elasticsearch is configured to use a keystore or truststore, + the API output includes all certificates in that store, even though some of the + certificates might not be in active use within the cluster. ``_ """ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 24c88d49a..eb1b11219 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -739,7 +739,8 @@ def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the search context and results for a scrolling search. + Clear a scrolling search. Clear the search context and results for a scrolling + search. ``_ @@ -789,7 +790,11 @@ def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes a point-in-time. + Close a point in time. A point in time must be opened explicitly before being + used in search requests. The `keep_alive` parameter tells Elasticsearch how long + it should persist. A point in time is automatically closed when the `keep_alive` + period has elapsed. However, keeping points in time has a cost; close them as + soon as they are no longer required for search requests. ``_ @@ -1402,7 +1407,10 @@ def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Delete By Query operation. + Throttle a delete by query operation. Change the number of requests per second + for a particular delete by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. ``_ @@ -1837,10 +1845,11 @@ def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The field capabilities API returns the information about the capabilities of - fields among multiple indices. The field capabilities API returns runtime fields - like any other field. For example, a runtime field with a type of keyword is - returned as any other field that belongs to the `keyword` family. + Get the field capabilities. Get information about the capabilities of fields + among multiple indices. For data streams, the API returns field capabilities + among the stream’s backing indices. It returns runtime fields like any other + field. For example, a runtime field with a type of keyword is returned the same + as any other field that belongs to the `keyword` family. ``_ @@ -2088,7 +2097,7 @@ def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all script contexts. + Get script contexts. Get a list of supported script contexts and their methods. ``_ """ @@ -2123,7 +2132,7 @@ def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns available script types, languages and contexts + Get script languages. Get a list of available script types, languages, and contexts. ``_ """ @@ -2503,7 +2512,15 @@ def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs a kNN search. + Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option + in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector + field and return the matching documents. Given a query vector, the API finds + the k closest vectors and returns those documents as search hits. Elasticsearch + uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, + HNSW is an approximate method that sacrifices result accuracy for improved search + speed. This means the results returned are not always the true k closest neighbors. + The kNN search API supports restricting the search using a filter. The search + will return the top k documents that also match the filter query. ``_ @@ -2604,7 +2621,10 @@ def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to get multiple documents in one request. + Get multiple documents. Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document + IDs in the request body. To ensure fast responses, this multi get (mget) API + responds with partial results if one or more shards fail. ``_ @@ -2725,7 +2745,13 @@ def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search operations in one request. + Run multiple searches. The format of the request is similar to the bulk API format + and makes use of the newline delimited JSON (NDJSON) format. The structure is + as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically + optimized to reduce parsing if a specific search ends up redirected to another + node. IMPORTANT: The final line of data must end with a newline character `\\n`. + Each newline character may be preceded by a carriage return `\\r`. When sending + requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. ``_ @@ -2857,7 +2883,7 @@ def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs multiple templated searches with a single request. + Run multiple templated searches. ``_ @@ -2952,7 +2978,11 @@ def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns multiple termvectors in one request. + Get multiple term vectors. You can specify existing documents by index and ID + or provide artificial documents in the body of the request. You can specify the + index in the request body or request URI. The response contains a `docs` array + with all the fetched termvectors. Each element has the structure provided by + the termvectors API. ``_ @@ -3063,13 +3093,15 @@ def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - A search request by default executes against the most recent visible data of - the target indices, which is called point in time. Elasticsearch pit (point in - time) is a lightweight view into the state of the data as it existed when initiated. - In some cases, it’s preferred to perform multiple search requests using the same - point in time. For example, if refreshes happen between `search_after` requests, - then the results of those requests might not be consistent as changes happening - between searches are only visible to the more recent point in time. + Open a point in time. A search request by default runs against the most recent + visible data of the target indices, which is called point in time. Elasticsearch + pit (point in time) is a lightweight view into the state of the data as it existed + when initiated. In some cases, it’s preferred to perform multiple search requests + using the same point in time. For example, if refreshes happen between `search_after` + requests, then the results of those requests might not be consistent as changes + happening between searches are only visible to the more recent point in time. + A point in time must be opened explicitly before being used in search requests. + The `keep_alive` parameter tells Elasticsearch how long it should persist. ``_ @@ -3236,8 +3268,8 @@ def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to evaluate the quality of ranked search results over a set of typical - search queries. + Evaluate ranked search results. Evaluate the quality of ranked search results + over a set of typical search queries. ``_ @@ -3429,7 +3461,8 @@ def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Copies documents from a source to a destination. + Throttle a reindex operation. Change the number of requests per second for a + particular reindex operation. ``_ @@ -3480,7 +3513,7 @@ def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Renders a search template as a search request body. + Render a search template. Render a search template as a search request body. ``_ @@ -3606,7 +3639,22 @@ def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to retrieve a large numbers of results from a single search request. + Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for + deep pagination. If you need to preserve the index state while paging through + more than 10,000 hits, use the `search_after` parameter with a point in time + (PIT). The scroll API gets large sets of results from a single scrolling search + request. To get the necessary scroll ID, submit a search API request that includes + an argument for the `scroll` query parameter. The `scroll` parameter indicates + how long Elasticsearch should retain the search context for the request. The + search response returns a scroll ID in the `_scroll_id` response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch + of results for the request. If the Elasticsearch security features are enabled, + the access to the results of a specific scroll ID is restricted to the user or + API key that submitted the search. You can also use the scroll API to specify + a new scroll parameter that extends or shortens the retention period for the + search context. IMPORTANT: Results from a scrolling search reflect the state + of the index at the time of the initial search request. Subsequent indexing or + document changes only affect later search and scroll requests. ``_ @@ -3796,9 +3844,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns search hits that match the query defined in the request. You can provide - search queries using the `q` query string parameter or the request body. If both - are specified, only the query parameter is used. + Run a search. Get search hits that match the query defined in the request. You + can provide search queries using the `q` query string parameter or the request + body. If both are specified, only the query parameter is used. ``_ @@ -4228,7 +4276,7 @@ def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Searches a vector tile for geospatial values. + Search a vector tile. Search a vector tile for geospatial values. ``_ @@ -4382,8 +4430,10 @@ def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the indices and shards that a search request would - be executed against. + Get the search shards. Get the indices and shards that a search request would + be run against. This information can be useful for working out issues or planning + optimizations with routing and shard preferences. When filtered aliases are used, + the filter is returned as part of the indices section. ``_ @@ -4484,7 +4534,7 @@ def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search with a search template. + Run a search with a search template. ``_ @@ -4616,9 +4666,15 @@ def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto-complete - scenarios. + Get terms in an index. Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete + scenarios. If the `complete` property in the response is false, the returned + terms set may be incomplete and should be treated as approximate. This can occur + due to a few reasons, such as a request timeout or a node error. NOTE: The terms + enum API may return terms from deleted documents. Deleted documents are initially + only marked as deleted. It is not until their segments are merged that documents + are actually deleted. Until that happens, the terms enum API will return terms + from these documents. ``_ @@ -4716,8 +4772,8 @@ def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Returns information and statistics about terms in - the fields of a particular document. + Get term vector information. Get information and statistics about terms in the + fields of a particular document. ``_ @@ -5222,7 +5278,10 @@ def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Update By Query operation. + Throttle an update by query operation. Change the number of requests per second + for a particular update by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. ``_ diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 8fbf3188d..427db59b2 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -36,11 +36,11 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by identifier. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. If - the Elasticsearch security features are enabled, the deletion of a specific async - search is restricted to: the authenticated user that submitted the original search - request; users that have the `cancel_task` cluster privilege. + Delete an async search. If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. If the Elasticsearch security + features are enabled, the deletion of a specific async search is restricted to: + the authenticated user that submitted the original search request; users that + have the `cancel_task` cluster privilege. ``_ @@ -85,9 +85,9 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the results of a previously submitted async search request given its - identifier. If the Elasticsearch security features are enabled, access to the - results of a specific async search is restricted to the user or API key that + Get async search results. Retrieve the results of a previously submitted asynchronous + search request. If the Elasticsearch security features are enabled, access to + the results of a specific async search is restricted to the user or API key that submitted it. ``_ @@ -148,10 +148,10 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status Retrieves the status of a previously submitted async - search request given its identifier, without retrieving search results. If the - Elasticsearch security features are enabled, use of this API is restricted to - the `monitoring_user` role. + Get the async search status. Get the status of a previously submitted async search + request given its identifier, without retrieving search results. If the Elasticsearch + security features are enabled, use of this API is restricted to the `monitoring_user` + role. ``_ @@ -323,15 +323,15 @@ def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search request asynchronously. When the primary sort of the results is - an indexed field, shards get sorted based on minimum and maximum value that they - hold for that field, hence partial results become available following the sort - criteria that was requested. Warning: Async search does not support scroll nor - search requests that only include the suggest section. By default, Elasticsearch - doesn’t allow you to store an async search response larger than 10Mb and an attempt - to do this results in an error. The maximum allowed size for a stored async search - response can be set by changing the `search.max_async_search_response_size` cluster - level setting. + Run an async search. When the primary sort of the results is an indexed field, + shards get sorted based on minimum and maximum value that they hold for that + field. Partial results become available following the sort criteria that was + requested. Warning: Asynchronous search does not support scroll or search requests + that include only the suggest section. By default, Elasticsearch does not allow + you to store an async search response larger than 10Mb and an attempt to do this + results in an error. The maximum allowed size for a stored async search response + can be set by changing the `search.max_async_search_response_size` cluster level + setting. ``_ diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index 2b2a23fc2..b271100c4 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -36,7 +36,8 @@ def delete_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Delete an autoscaling policy. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -76,8 +77,18 @@ def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets the current autoscaling capacity based on the configured autoscaling policy. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the autoscaling capacity. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. This API gets the current autoscaling capacity based + on the configured autoscaling policy. It will return information to size the + cluster appropriately to the current workload. The `required_capacity` is calculated + as the maximum of the `required_capacity` result of all individual deciders that + are enabled for the policy. The operator should verify that the `current_nodes` + match the operator’s knowledge of the cluster to avoid making autoscaling decisions + based on stale or incomplete information. The response contains decider-specific + information you can use to diagnose how and why autoscaling determined a certain + capacity was required. This information is provided for diagnosis only. Do not + use this information to make autoscaling decisions. ``_ """ @@ -113,7 +124,8 @@ def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Get an autoscaling policy. NOTE: This feature is designed for indirect use by + Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -158,8 +170,9 @@ def put_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. - Direct use is not supported. + Create or update an autoscaling policy. NOTE: This feature is designed for indirect + use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on + Kubernetes. Direct use is not supported. ``_ diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index 79683d2a7..b7b2675b2 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -68,6 +68,8 @@ def delete_auto_follow_pattern( @_rewrite_parameters( body_fields=( "leader_index", + "remote_cluster", + "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", @@ -78,29 +80,31 @@ def delete_auto_follow_pattern( "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", - "remote_cluster", + "settings", ), ) def follow( self, *, index: str, + leader_index: t.Optional[str] = None, + remote_cluster: t.Optional[str] = None, + data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - leader_index: t.Optional[str] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, - max_read_request_size: t.Optional[str] = None, + max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, - max_write_buffer_size: t.Optional[str] = None, + max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, - max_write_request_size: t.Optional[str] = None, + max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - remote_cluster: t.Optional[str] = None, + settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, @@ -111,26 +115,51 @@ def follow( ``_ - :param index: The name of the follower index - :param leader_index: - :param max_outstanding_read_requests: - :param max_outstanding_write_requests: - :param max_read_request_operation_count: - :param max_read_request_size: - :param max_retry_delay: - :param max_write_buffer_count: - :param max_write_buffer_size: - :param max_write_request_operation_count: - :param max_write_request_size: - :param read_poll_timeout: - :param remote_cluster: - :param wait_for_active_shards: Sets the number of shard copies that must be active - before returning. Defaults to 0. Set to `all` for all shard copies, otherwise - set to any non-negative value less than or equal to the total number of copies - for the shard (number of replicas + 1) + :param index: The name of the follower index. + :param leader_index: The name of the index in the leader cluster to follow. + :param remote_cluster: The remote cluster containing the leader index. + :param data_stream_name: If the leader index is part of a data stream, the name + to which the local data stream for the followed index should be renamed. + :param max_outstanding_read_requests: The maximum number of outstanding reads + requests from the remote cluster. + :param max_outstanding_write_requests: The maximum number of outstanding write + requests on the follower. + :param max_read_request_operation_count: The maximum number of operations to + pull per read from the remote cluster. + :param max_read_request_size: The maximum size in bytes of per read of a batch + of operations pulled from the remote cluster. + :param max_retry_delay: The maximum time to wait before retrying an operation + that failed exceptionally. An exponential backoff strategy is employed when + retrying. + :param max_write_buffer_count: The maximum number of operations that can be queued + for writing. When this limit is reached, reads from the remote cluster will + be deferred until the number of queued operations goes below the limit. + :param max_write_buffer_size: The maximum total bytes of operations that can + be queued for writing. When this limit is reached, reads from the remote + cluster will be deferred until the total bytes of queued operations goes + below the limit. + :param max_write_request_operation_count: The maximum number of operations per + bulk write request executed on the follower. + :param max_write_request_size: The maximum total bytes of operations per bulk + write request executed on the follower. + :param read_poll_timeout: The maximum time to wait for new operations on the + remote cluster when the follower index is synchronized with the leader index. + When the timeout has elapsed, the poll for operations will return to the + follower so that it can update some statistics. Then the follower will immediately + attempt to read from the leader again. + :param settings: Settings to override from the leader index. + :param wait_for_active_shards: Specifies the number of shards to wait on being + active before responding. This defaults to waiting on none of the shards + to be active. A shard must be restored from the leader index before being + active. Restoring a follower shard requires transferring all the remote Lucene + segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if leader_index is None and body is None: + raise ValueError("Empty value passed for parameter 'leader_index'") + if remote_cluster is None and body is None: + raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} @@ -148,6 +177,10 @@ def follow( if not __body: if leader_index is not None: __body["leader_index"] = leader_index + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster + if data_stream_name is not None: + __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: @@ -174,8 +207,8 @@ def follow( __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout - if remote_cluster is not None: - __body["remote_cluster"] = remote_cluster + if settings is not None: + __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index d097da30b..b35d858ff 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -36,7 +36,8 @@ def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the last_seen field in the connector, and sets it to current timestamp + Check in a connector. Update the `last_seen` field in the connector and set it + to the current timestamp. ``_ @@ -77,7 +78,10 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector. + Delete a connector. Removes a connector and associated sync jobs. This is a destructive + action that is not recoverable. NOTE: This action doesn’t delete any API keys, + ingest pipelines, or data indices associated with the connector. These need to + be removed manually. ``_ @@ -121,7 +125,7 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector. + Get a connector. Get the details about a connector. ``_ @@ -215,7 +219,8 @@ def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates last sync stats in the connector document + Update the connector last sync stats. Update the fields related to the last sync + of a connector. This action is used for analytics and monitoring. ``_ @@ -309,7 +314,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns existing connectors. + Get all connectors. Get information about all connectors. ``_ @@ -383,7 +388,11 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector. + Create a connector. Connectors are Elasticsearch integrations that bring content + from third-party data sources, which can be deployed on Elastic Cloud or hosted + on your own infrastructure. Elastic managed connectors (Native connectors) are + a managed service on Elastic Cloud. Self-managed connectors (Connector clients) + are self-managed on your infrastructure. ``_ @@ -461,7 +470,7 @@ def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a connector. + Create or update a connector. ``_ @@ -530,7 +539,10 @@ def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a connector sync job. + Cancel a connector sync job. Cancel a connector sync job, which sets the status + to cancelling and updates `cancellation_requested_at` to the current time. The + connector service is then responsible for setting the status of connector sync + jobs to cancelled. ``_ @@ -574,7 +586,8 @@ def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector sync job. + Delete a connector sync job. Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable. ``_ @@ -617,7 +630,7 @@ def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector sync job. + Get a connector sync job. ``_ @@ -685,7 +698,8 @@ def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists connector sync jobs. + Get all connector sync jobs. Get information about all stored connector sync + jobs listed by their creation date in ascending order. ``_ @@ -746,7 +760,8 @@ def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector sync job. + Create a connector sync job. Create a connector sync job document in the internal + index and initialize its counters and timestamps with default values. ``_ @@ -797,7 +812,8 @@ def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates the valid draft filtering for a connector. + Activate the connector draft filter. Activates the valid draft filtering for + a connector. ``_ @@ -842,7 +858,11 @@ def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the API key id in the connector document + Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` + fields of a connector. You can specify the ID of the API key used for authorization + and the ID of the connector secret where the API key is stored. The connector + secret ID is required only for Elastic managed (native) connectors. Self-managed + connectors (connector clients) do not use this field. ``_ @@ -896,7 +916,8 @@ def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the configuration field in the connector document + Update the connector configuration. Update the configuration field in the connector + document. ``_ @@ -949,7 +970,10 @@ def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector error field. Set the error field for the connector. If the + error provided in the request body is non-null, the connector’s status is updated + to error. Otherwise, if the error is reset to null, the connector status is updated + to connected. ``_ @@ -1003,7 +1027,10 @@ def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector filtering. Update the draft filtering configuration of a + connector and marks the draft validation state as edited. The filtering draft + is activated once validated by the running Elastic connector service. The filtering + property is used to configure sync rules (both basic and advanced) for a connector. ``_ @@ -1059,7 +1086,8 @@ def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the draft filtering validation info for a connector. + Update the connector draft filtering validation. Update the draft filtering validation + info for a connector. ``_ @@ -1111,7 +1139,8 @@ def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index_name in the connector document + Update the connector index name. Update the `index_name` field of a connector, + specifying the index where the data ingested by the connector is stored. ``_ @@ -1164,7 +1193,7 @@ def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the name and description fields in the connector document + Update the connector name and description. ``_ @@ -1217,7 +1246,7 @@ def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the is_native flag in the connector document + Update the connector is_native flag. ``_ @@ -1269,7 +1298,8 @@ def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the pipeline field in the connector document + Update the connector pipeline. When you create a new connector, the configuration + of an ingest pipeline is populated with default settings. ``_ @@ -1321,7 +1351,7 @@ def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the scheduling field in the connector document + Update the connector scheduling. ``_ @@ -1373,7 +1403,7 @@ def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the service type of the connector + Update the connector service type. ``_ @@ -1432,7 +1462,7 @@ def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the status of the connector + Update the connector status. ``_ diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index 4cf0ec024..6a4930c6d 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -39,13 +39,17 @@ def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified dangling index + Delete a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to delete the dangling - index + :param index_uuid: The UUID of the index to delete. Use the get dangling indices + API to find the UUID. + :param accept_data_loss: This parameter must be set to true to acknowledge that + it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -94,13 +98,20 @@ def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Imports the specified dangling index + Import a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to import the dangling - index + :param index_uuid: The UUID of the index to import. Use the get dangling indices + API to locate the UUID. + :param accept_data_loss: This parameter must be set to true to import a dangling + index. Because Elasticsearch cannot know where the dangling index data came + from or determine which shard copies are fresh and which are stale, it cannot + guarantee that the imported data represents the latest state of the index + when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -145,7 +156,11 @@ def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all dangling indices. + Get the dangling indices. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. Use this API to list dangling + indices, which you can then import or delete. ``_ """ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index ce3e515f3..63ef319fb 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -174,6 +174,7 @@ def get_status( "filter", "keep_alive", "keep_on_completion", + "max_samples_per_key", "result_position", "runtime_mappings", "size", @@ -211,6 +212,7 @@ def search( ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, + max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -244,6 +246,11 @@ def search( in the response. :param keep_alive: :param keep_on_completion: + :param max_samples_per_key: By default, the response of a sample query contains + up to `10` samples, with one sample per unique set of join keys. Use the + `size` parameter to get a smaller or larger set of samples. To retrieve more + than one sample per set of join keys, use the `max_samples_per_key` parameter. + Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. @@ -292,6 +299,8 @@ def search( __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion + if max_samples_per_key is not None: + __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 7b59bb318..ef9d5a34a 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -137,7 +137,8 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs analysis on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) + on a text string and returns the resulting tokens. ``_ @@ -1271,7 +1272,6 @@ def exists_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ @@ -1292,8 +1292,6 @@ def exists_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. - :param local: If `true`, the request retrieves information from the local node - only. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1319,8 +1317,6 @@ def exists_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -1863,7 +1859,6 @@ def get_alias( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1885,8 +1880,6 @@ def get_alias( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param local: If `true`, the request retrieves information from the local node - only. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1914,8 +1907,6 @@ def get_alias( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -2809,14 +2800,14 @@ def put_alias( ) @_rewrite_parameters( - body_fields=("data_retention", "downsampling"), + body_name="lifecycle", ) def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2831,7 +2822,6 @@ def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Update data stream lifecycles. Update the data stream lifecycle of the specified @@ -2841,13 +2831,7 @@ def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param data_retention: If defined, every document added to this data stream will - be stored at least for this time frame. Any time after this duration the - document could be deleted. When empty, every document in this data stream - will be stored indefinitely. - :param downsampling: If defined, every backing index will execute the configured - downsampling configuration after the backing index is not the data stream - write index anymore. + :param lifecycle: :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -2859,10 +2843,15 @@ def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") + if lifecycle is None and body is None: + raise ValueError( + "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." + ) + elif lifecycle is not None and body is not None: + raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2877,16 +2866,8 @@ def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - if not __body: - if data_retention is not None: - __body["data_retention"] = data_retention - if downsampling is not None: - __body["downsampling"] = downsampling - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" + __body = lifecycle if lifecycle is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3864,7 +3845,6 @@ def segments( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Returns low-level information about the Lucene segments in index shards. For @@ -3884,7 +3864,6 @@ def segments( as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param verbose: If `true`, the request returns a verbose response. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -3908,8 +3887,6 @@ def segments( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty - if verbose is not None: - __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index b157260ae..df17aa247 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -3368,7 +3368,7 @@ def put_datafeed( Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') + (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 7b66ca7ed..48d4ae70a 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -382,3 +382,56 @@ def put_ruleset( endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("match_criteria",), + ) + def test( + self, + *, + ruleset_id: str, + match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Creates or updates a query ruleset. + + ``_ + + :param ruleset_id: The unique identifier of the query ruleset to be created or + updated + :param match_criteria: + """ + if ruleset_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'ruleset_id'") + if match_criteria is None and body is None: + raise ValueError("Empty value passed for parameter 'match_criteria'") + __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} + __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if match_criteria is not None: + __body["match_criteria"] = match_criteria + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="query_rules.test", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index b6ed81d56..3fbcb0ec4 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -44,7 +44,8 @@ def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a user profile on behalf of another user. + Activate a user profile. Create or update a user profile on behalf of another + user. ``_ @@ -144,9 +145,9 @@ def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk delete roles API cannot delete - roles that are defined in roles files. + Bulk delete roles. The role management APIs are generally the preferred way to + manage roles, rather than using file-based role management. The bulk delete roles + API cannot delete roles that are defined in roles files. ``_ @@ -202,9 +203,9 @@ def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk create or update roles API cannot - update roles that are defined in roles files. + Bulk create or update roles. The role management APIs are generally the preferred + way to manage roles, rather than using file-based role management. The bulk create + or update roles API cannot update roles that are defined in roles files. ``_ @@ -262,7 +263,8 @@ def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the passwords of users in the native realm and built-in users. + Change passwords. Change the passwords of users in the native realm and built-in + users. ``_ @@ -324,8 +326,8 @@ def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts a subset of all entries from the API key cache. The cache is also automatically - cleared on state changes of the security index. + Clear the API key cache. Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index. ``_ @@ -366,7 +368,9 @@ def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts application privileges from the native application privileges cache. + Clear the privileges cache. Evict privileges from the native application privilege + cache. The cache is also automatically cleared for applications that have their + privileges updated. ``_ @@ -407,8 +411,8 @@ def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts users from the user cache. Can completely clear the cache or evict specific - users. + Clear the user cache. Evict users from the user cache. You can completely clear + the cache or evict specific users. ``_ @@ -451,7 +455,7 @@ def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts roles from the native role cache. + Clear the roles cache. Evict roles from the native role cache. ``_ @@ -493,7 +497,8 @@ def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts tokens from the service account token caches. + Clear service account token caches. Evict a subset of all entries from the service + account token caches. ``_ @@ -552,7 +557,7 @@ def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Creates an API key for access without requiring basic authentication. + Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You @@ -612,6 +617,90 @@ def create_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "name", "expiration", "metadata"), + ) + def create_cross_cluster_api_key( + self, + *, + access: t.Optional[t.Mapping[str, t.Any]] = None, + name: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a cross-cluster API key. Create an API key of the `cross_cluster` type + for the API key based remote cluster access. A `cross_cluster` API key cannot + be used to authenticate through the REST interface. IMPORTANT: To authenticate + this request you must use a credential that is not an API key. Even if you use + an API key that has the required privilege, the API returns an error. Cross-cluster + API keys are created by the Elasticsearch API key service, which is automatically + enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture + permissions of the authenticated user. The API key’s effective permission is + exactly as specified with the `access` property. A successful request returns + a JSON structure that contains the API key, its unique ID, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. Cross-cluster API keys can only be updated with the update cross-cluster + API key API. Attempting to update them with the update REST API key API or the + bulk update REST API keys API will result in an error. + + ``_ + + :param access: The access to be granted to this API key. The access is composed + of permissions for cross-cluster search and cross-cluster replication. At + least one of them must be specified. NOTE: No explicit privileges should + be specified for either search or replication access. The creation process + automatically converts the access specification to a role descriptor which + has relevant privileges assigned accordingly. + :param name: Specifies the name for this API key. + :param expiration: Expiration time for the API key. By default, API keys never + expire. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. + """ + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + if name is None and body is None: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/cross_cluster/api_key" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if name is not None: + __body["name"] = name + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.create_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters() def create_service_token( self, @@ -628,7 +717,8 @@ def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service accounts token for access without requiring basic authentication. + Create a service account token. Create a service accounts token for access without + requiring basic authentication. ``_ @@ -698,7 +788,7 @@ def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes application privileges. + Delete application privileges. ``_ @@ -754,7 +844,7 @@ def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes roles in the native realm. + Delete roles. Delete roles in the native realm. ``_ @@ -802,7 +892,7 @@ def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes role mappings. + Delete role mappings. ``_ @@ -852,7 +942,8 @@ def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a service account token. + Delete service account tokens. Delete service account tokens for a service in + a specified namespace. ``_ @@ -910,7 +1001,7 @@ def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes users from the native realm. + Delete users. Delete users from the native realm. ``_ @@ -958,7 +1049,7 @@ def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables users in the native realm. + Disable users. Disable users in the native realm. ``_ @@ -1006,7 +1097,8 @@ def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables a user profile so it's not visible in user profile searches. + Disable a user profile. Disable user profiles so that they are not visible in + user profile searches. ``_ @@ -1054,7 +1146,7 @@ def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables users in the native realm. + Enable users. Enable users in the native realm. ``_ @@ -1102,7 +1194,8 @@ def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a user profile so it's visible in user profile searches. + Enable a user profile. Enable user profiles to make them visible in user profile + searches. ``_ @@ -1146,8 +1239,8 @@ def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a Kibana instance to configure itself for communication with a secured - Elasticsearch cluster. + Enroll Kibana. Enable a Kibana instance to configure itself for communication + with a secured Elasticsearch cluster. ``_ """ @@ -1182,7 +1275,8 @@ def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to join an existing cluster with security features enabled. + Enroll a node. Enroll a new node to allow it to join an existing cluster with + security features enabled. ``_ """ @@ -1303,8 +1397,8 @@ def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the list of cluster privileges and index privileges that are available - in this version of Elasticsearch. + Get builtin privileges. Get the list of cluster privileges and index privileges + that are available in this version of Elasticsearch. ``_ """ @@ -1341,7 +1435,7 @@ def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves application privileges. + Get application privileges. ``_ @@ -1388,9 +1482,7 @@ def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The get roles API cannot retrieve roles - that are defined in roles files. + Get roles. Get roles in the native realm. ``_ @@ -1435,7 +1527,10 @@ def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves role mappings. + Get role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The get role mappings API cannot retrieve + role mappings that are defined in role mapping files. ``_ @@ -1483,7 +1578,8 @@ def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API returns a list of service accounts that match the provided path parameter(s). + Get service accounts. Get a list of service accounts that match the provided + path parameters. ``_ @@ -1534,7 +1630,7 @@ def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information of all service credentials for a service account. + Get service account credentials. ``_ @@ -1602,7 +1698,7 @@ def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a bearer token for access without requiring basic authentication. + Get a token. Create a bearer token for access without requiring basic authentication. ``_ @@ -1661,7 +1757,7 @@ def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about users in the native realm and built-in users. + Get users. Get information about users in the native realm and built-in users. ``_ @@ -1712,7 +1808,7 @@ def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves security privileges for the logged in user. + Get user privileges. ``_ @@ -1762,7 +1858,7 @@ def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. ``_ @@ -1826,21 +1922,21 @@ def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. This API is similar to Create API - keys, however it creates the API key for a user that is different than the user - that runs the API. The caller must have authentication credentials (either an - access token, or a username and password) for the user on whose behalf the API - key will be created. It is not possible to use this API to create an API key - without that user’s credentials. The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. A successful grant API key API call returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. By default, API keys - never expire. You can specify expiration information when you create the API - keys. + Grant an API key. Create an API key on behalf of another user. This API is similar + to the create API keys API, however it creates the API key for a user that is + different than the user that runs the API. The caller must have authentication + credentials (either an access token, or a username and password) for the user + on whose behalf the API key will be created. It is not possible to use this API + to create an API key without that user’s credentials. The user, for whom the + authentication credentials is provided, can optionally "run as" (impersonate) + another user. In this case, the API key will be created on behalf of the impersonated + user. This API is intended be used by applications that need to create and manage + API keys for end users, but cannot guarantee that those users have permission + to create API keys on their own behalf. A successful grant API key API call returns + a JSON structure that contains the API key, its unique id, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. ``_ @@ -1980,8 +2076,8 @@ def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determines whether the specified user has a specified - list of privileges. + Check user privileges. Determine whether the specified user has a specified list + of privileges. ``_ @@ -2040,8 +2136,8 @@ def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Determines whether the users associated with the specified profile IDs have all - the requested privileges. + Check user profile privileges. Determine whether the users associated with the + specified user profile IDs have all the requested privileges. ``_ @@ -2100,13 +2196,17 @@ def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege - allows deleting any API keys. The `manage_own_api_key` only allows deleting API - keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, - an invalidation request must be issued in one of the three formats: - Set the - parameter `owner=true`. - Or, set both `username` and `realm_name` to match the - user’s identity. - Or, if the request is issued by an API key, i.e. an API key - invalidates itself, specify its ID in the `ids` field. + Invalidate API keys. This API invalidates API keys created by the create API + key or grant API key APIs. Invalidated API keys fail authentication, but they + can still be viewed using the get API key information and query API key information + APIs, for at least the configured retention period, until they are automatically + deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` + only allows deleting API keys that are owned by the user. In addition, with the + `manage_own_api_key` privilege, an invalidation request must be issued in one + of the three formats: - Set the parameter `owner=true`. - Or, set both `username` + and `realm_name` to match the user’s identity. - Or, if the request is issued + by an API key, that is to say an API key invalidates itself, specify its ID in + the `ids` field. ``_ @@ -2177,7 +2277,12 @@ def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more access tokens or refresh tokens. + Invalidate a token. The access tokens returned by the get token API have a finite + period of time for which they are valid. After that time period, they can no + longer be used. The time period is defined by the `xpack.security.authc.token.timeout` + setting. The refresh tokens returned by the get token API are only valid for + 24 hours. They can also be used exactly once. If you want to invalidate one or + more access or refresh tokens immediately, use this invalidate token API. ``_ @@ -2237,7 +2342,7 @@ def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds or updates application privileges. + Create or update application privileges. ``_ @@ -2380,9 +2485,10 @@ def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The create or update roles API cannot - update roles that are defined in roles files. + Create or update roles. The role management APIs are generally the preferred + way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles + files. File-based role management is not available in Elastic Serverless. ``_ @@ -2491,7 +2597,14 @@ def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates and updates role mappings. + Create or update role mappings. Role mappings define which roles are assigned + to each user. Each mapping has rules that identify users and a list of roles + that are granted to those users. The role mapping APIs are generally the preferred + way to manage role mappings rather than using role mapping files. The create + or update role mappings API cannot update role mappings that are defined in role + mapping files. This API does not create roles. Rather, it maps users to existing + roles. Roles can be created by using the create or update roles API or roles + files. ``_ @@ -2578,8 +2691,9 @@ def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates users in the native realm. These users are commonly referred - to as native users. + Create or update users. A password is required for adding a new user but is optional + when updating an existing user. To change a user’s password without updating + any other fields, use the change password API. ``_ @@ -2676,7 +2790,7 @@ def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query API keys. Retrieves a paginated list of API keys and their information. + Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. ``_ @@ -2803,8 +2917,8 @@ def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in a paginated manner. You can optionally filter the results - with a query. + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. ``_ @@ -2889,8 +3003,8 @@ def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for Users in a paginated manner. You can optionally filter - the results with a query. + Find users with a query. Get information for users in a paginated manner. You + can optionally filter the results with a query. ``_ @@ -2968,7 +3082,7 @@ def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML Response message to Elasticsearch for consumption. + Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. ``_ @@ -3030,7 +3144,7 @@ def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP. + Logout of SAML completely. Verifies the logout response sent from the SAML IdP. ``_ @@ -3096,7 +3210,7 @@ def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. ``_ @@ -3163,7 +3277,7 @@ def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a request to invalidate an access token and refresh token. + Logout of SAML. Submits a request to invalidate an access token and refresh token. ``_ @@ -3220,8 +3334,8 @@ def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request () as a URL string, based - on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Creates a SAML authentication request (``) + as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. ``_ @@ -3276,7 +3390,8 @@ def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generate SAML metadata for a SAML 2.0 Service Provider. + Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 + Service Provider. ``_ @@ -3322,7 +3437,8 @@ def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get suggestions for user profiles that match specified search criteria. + Suggest a user profile. Get suggestions for user profiles that match specified + search criteria. ``_ @@ -3460,6 +3576,74 @@ def update_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "expiration", "metadata"), + ) + def update_cross_cluster_api_key( + self, + *, + id: str, + access: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update a cross-cluster API key. Update the attributes of an existing cross-cluster + API key, which is used for API key based remote cluster access. + + ``_ + + :param id: The ID of the cross-cluster API key to update. + :param access: The access to be granted to this API key. The access is composed + of permissions for cross cluster search and cross cluster replication. At + least one of them must be specified. When specified, the new access assignment + fully replaces the previously assigned access. + :param expiration: Expiration time for the API key. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this information + fully replaces metadata previously associated with the API key. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) @@ -3481,8 +3665,8 @@ def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates specific data for the user profile that's associated with the specified - unique ID. + Update user profile data. Update specific data for the user profile that is associated + with a unique ID. ``_ diff --git a/elasticsearch/_sync/client/ssl.py b/elasticsearch/_sync/client/ssl.py index 19892748e..a45de4339 100644 --- a/elasticsearch/_sync/client/ssl.py +++ b/elasticsearch/_sync/client/ssl.py @@ -35,8 +35,23 @@ def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the X.509 certificates used to encrypt communications - in the cluster. + Get SSL certificates. Get information about the X.509 certificates that are used + to encrypt communications in the cluster. The API returns a list that includes + certificates from all TLS contexts including: - Settings for transport and HTTP + interfaces - TLS settings that are used within authentication realms - TLS settings + for remote monitoring exporters The list includes certificates that are used + for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` + and `xpack.security.transport.ssl.certificate_authorities` settings. It also + includes certificates that are used for configuring server identity, such as + `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + The list does not include certificates that are sourced from the default SSL + context of the Java Runtime Environment (JRE), even if those certificates are + in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the + truststore of the JRE, the API returns all the certificates that are included + in the PKCS#11 token irrespective of whether these are used in the Elasticsearch + TLS configuration. If Elasticsearch is configured to use a keystore or truststore, + the API output includes all certificates in that store, even though some of the + certificates might not be in active use within the cluster. ``_ """