diff --git a/CHANGELOG.md b/CHANGELOG.md index 426db9b8..ab2e7bf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Generate `cluster` client from API specs ([#530](https://github.com/opensearch-project/opensearch-py/pull/530)) - Generate `nodes` client from API specs ([#514](https://github.com/opensearch-project/opensearch-py/pull/514)) - Generate `cat` client from API specs ([#529](https://github.com/opensearch-project/opensearch-py/pull/529)) +- Use API generator for all APIs ([#551](https://github.com/opensearch-project/opensearch-py/pull/551)) ### Deprecated - Deprecated point-in-time APIs (list_all_point_in_time, create_point_in_time, delete_point_in_time) and Security Client APIs (health_check and update_audit_config) ([#502](https://github.com/opensearch-project/opensearch-py/pull/502)) ### Removed diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 5fe9cad1..dd1fad26 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -1,10 +1,10 @@ - [Developer Guide](#developer-guide) - [Prerequisites](#prerequisites) - - [Docker Image Installation](#docker-setup) + - [Install Docker Image](#install-docker-image) - [Running Tests](#running-tests) - - [Integration Tests](#integration-tests) + - [Linter](#linter) - [Documentation](#documentation) - - [Running Python Client Generator](#running-python-client-generator) + - [Client Code Generator](#client-code-generator) # Developer Guide @@ -115,12 +115,10 @@ make html Open `opensearch-py/docs/build/html/index.html` to see results. -## Running Python Client Generator +## Client Code Generator -The following code executes a python client generator that updates the client by utilizing the [openapi specifications](https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json) found in the "opensearch-api-specification" repository. This process allows for the automatic generation and synchronization of the client code with the latest API specifications. +OpenSearch publishes an [OpenAPI specification](https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json) in the [opensearch-api-specification](https://github.com/opensearch-project/opensearch-api-specification) repository, which is used to auto-generate the less interesting parts of the client. ``` -cd opensearch-py -python utils/generate-api.py -nox -rs format +nox -rs generate ``` diff --git a/benchmarks/bench_async.py b/benchmarks/bench_async.py index d08ca634..c7eb5714 100644 --- a/benchmarks/bench_async.py +++ b/benchmarks/bench_async.py @@ -1,10 +1,14 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. import asyncio import uuid @@ -24,7 +28,7 @@ async def index_records(client, item_count): client.index( index=index_name, body={ - "title": f"Moneyball", + "title": "Moneyball", "director": "Bennett Miller", "year": "2011", }, diff --git a/benchmarks/bench_info_sync.py b/benchmarks/bench_info_sync.py index 03e6f998..229a2e4d 100644 --- a/benchmarks/bench_info_sync.py +++ b/benchmarks/bench_info_sync.py @@ -1,10 +1,15 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import logging import sys @@ -35,7 +40,7 @@ def get_info(client, request_count): tt = 0 for n in range(request_count): start = time.time() * 1000 - rc = client.info() + client.info() total_time = time.time() * 1000 - start tt += total_time return tt diff --git a/benchmarks/bench_sync.py b/benchmarks/bench_sync.py index f20ca9f0..e201eaba 100644 --- a/benchmarks/bench_sync.py +++ b/benchmarks/bench_sync.py @@ -1,12 +1,18 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. import json +import logging +import sys import time import uuid @@ -20,9 +26,6 @@ index_name = "test-index-sync" item_count = 1000 -import logging -import sys - root = logging.getLogger() # root.setLevel(logging.DEBUG) # logging.getLogger("urllib3.connectionpool").setLevel(logging.DEBUG) diff --git a/benchmarks/bench_sync_async.py b/benchmarks/bench_sync_async.py index 5fa97f46..7950dc64 100644 --- a/benchmarks/bench_sync_async.py +++ b/benchmarks/bench_sync_async.py @@ -1,10 +1,15 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import bench_async import bench_sync diff --git a/benchmarks/thread_with_return_value.py b/benchmarks/thread_with_return_value.py index fb495656..b6bc9c09 100644 --- a/benchmarks/thread_with_return_value.py +++ b/benchmarks/thread_with_return_value.py @@ -1,10 +1,13 @@ -#!/usr/bin/env python - +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + from threading import Thread diff --git a/dev-requirements.txt b/dev-requirements.txt index 04cfb3e8..a79a1a0b 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -7,6 +7,7 @@ sphinx<7.3 sphinx_rtd_theme jinja2 pytz +deepmerge # No wheels for Python 3.10 yet! numpy; python_version<"3.10" diff --git a/docs/source/conf.py b/docs/source/conf.py index ea677630..133a2564 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,3 +1,12 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full diff --git a/noxfile.py b/noxfile.py index 6b734b48..a9cd9068 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -33,6 +34,9 @@ "opensearchpy/", "test_opensearchpy/", "utils/", + "samples/", + "benchmarks/", + "docs/", ) @@ -87,3 +91,10 @@ def docs(session): "-rdev-requirements.txt", "sphinx-rtd-theme", "sphinx-autodoc-typehints" ) session.run("python", "-m", "pip", "install", "sphinx-autodoc-typehints") + + +@nox.session() +def generate(session): + session.install("-rdev-requirements.txt") + session.run("python", "utils/generate-api.py") + format(session) diff --git a/opensearchpy/__init__.py b/opensearchpy/__init__.py index a0ea9f60..8116d60a 100644 --- a/opensearchpy/__init__.py +++ b/opensearchpy/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/__init__.pyi b/opensearchpy/__init__.pyi index 0fa4afcf..96c17075 100644 --- a/opensearchpy/__init__.pyi +++ b/opensearchpy/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/__init__.py b/opensearchpy/_async/__init__.py index 7e52ae22..392fa5bd 100644 --- a/opensearchpy/_async/__init__.py +++ b/opensearchpy/_async/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/_extra_imports.py b/opensearchpy/_async/_extra_imports.py index 5fd19461..e19a11a9 100644 --- a/opensearchpy/_async/_extra_imports.py +++ b/opensearchpy/_async/_extra_imports.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/__init__.py b/opensearchpy/_async/client/__init__.py index 2440b291..a7587f82 100644 --- a/opensearchpy/_async/client/__init__.py +++ b/opensearchpy/_async/client/__init__.py @@ -25,6 +25,17 @@ # specific language governing permissions and limitations # under the License. + +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from __future__ import unicode_literals import logging @@ -39,6 +50,7 @@ from .nodes import NodesClient from .plugins import PluginsClient from .remote import RemoteClient +from .remote_store import RemoteStoreClient from .security import SecurityClient from .snapshot import SnapshotClient from .tasks import TasksClient @@ -206,6 +218,7 @@ class as kwargs, or a string in the format of ``host[:port]`` which will be self.security = SecurityClient(self) self.snapshot = SnapshotClient(self) self.tasks = TasksClient(self) + self.remote_store = RemoteStoreClient(self) self.features = FeaturesClient(self) @@ -274,25 +287,25 @@ async def create(self, index, id, body, params=None, headers=None): with a same ID already exists in the index. - :arg index: The name of the index - :arg id: Document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The document :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the index operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id, body): if param in SKIP_IN_PATH: @@ -322,46 +335,42 @@ async def index(self, index, body, id=None, params=None, headers=None): Creates or updates a document in an index. - :arg index: The name of the index + :arg index: Index name. :arg body: The document - :arg id: Document ID - :arg if_primary_term: only perform the index operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the index operation if the last - operation that has changed the document has the specified sequence - number + :arg id: Document ID. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. :arg op_type: Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests - without an explicit document ID Valid choices: index, create + without an explicit document ID. Valid choices are index, create. :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for + do nothing with refreshes. Valid choices are true, false, wait_for. :arg require_alias: When true, requires destination to be an - alias. Default is false - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte + alias. Default is false. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the index operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return await self.transport.perform_request( "POST" if id in SKIP_IN_PATH else "PUT", - _make_path(index, doc_type, id), + _make_path(index, "_doc", id), params=params, headers=headers, body=body, @@ -385,29 +394,29 @@ async def bulk(self, body, index=None, params=None, headers=None): :arg body: The operation definition and data (action-data pairs), separated by newlines - :arg index: Default index for items which don't provide one + :arg index: Default index for items which don't provide one. :arg _source: True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub- - request + request. :arg _source_excludes: Default list of fields to exclude from - the returned _source field, can be overridden on each sub-request + the returned _source field, can be overridden on each sub-request. :arg _source_includes: Default list of fields to extract and - return from the _source field, can be overridden on each sub-request + return from the _source field, can be overridden on each sub-request. :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for + do nothing with refreshes. Valid choices are true, false, wait_for. :arg require_alias: Sets require_alias for all incoming - documents. Defaults to unset (false) - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout + documents. Default is false. + :arg routing: Routing value. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the bulk operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -427,9 +436,9 @@ async def clear_scroll(self, body=None, scroll_id=None, params=None, headers=Non Explicitly clears the search context for a scroll. - :arg body: A comma-separated list of scroll IDs to clear if none + :arg body: Comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter - :arg scroll_id: A comma-separated list of scroll IDs to clear + :arg scroll_id: Comma-separated list of scroll IDs to clear. """ if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: raise ValueError("You need to supply scroll_id or body.") @@ -463,37 +472,38 @@ async def count(self, body=None, index=None, params=None, headers=None): Returns number of documents matching a query. - :arg body: A query to restrict the results specified with the + :arg body: Query to restrict the results specified with the Query DSL (optional) - :arg index: A comma-separated list of indices to restrict the - results + :arg index: Comma-separated list of indices to restrict the + results. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg min_score: Include only documents with a specific `_score` - value in the result + value in the result. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg routing: A comma-separated list of specific routing values - :arg terminate_after: The maximum count for each shard, upon - reaching which the query execution will terminate early + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg routing: Comma-separated list of specific routing values. + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. """ return await self.transport.perform_request( "POST", @@ -518,37 +528,33 @@ async def delete(self, index, id, params=None, headers=None): Removes a document from the index. - :arg index: The name of the index - :arg id: The document ID - :arg if_primary_term: only perform the delete operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the delete operation if the last - operation that has changed the document has the specified sequence - number + :arg index: Index name. + :arg id: Document ID. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the delete operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return await self.transport.perform_request( - "DELETE", _make_path(index, doc_type, id), params=params, headers=headers + "DELETE", _make_path(index, "_doc", id), params=params, headers=headers ) @query_params( @@ -591,76 +597,76 @@ async def delete_by_query(self, index, body, params=None, headers=None): Deletes documents matching the provided query. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg body: The search definition using the Query DSL :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg conflicts: What to do when the delete by query hits version - conflicts? Valid choices: abort, proceed Default: abort + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. + :arg conflicts: What to do when the operation encounters version + conflicts?. Valid choices are abort, proceed. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg from_: Starting offset (default: 0) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg from_: Starting offset. Default is 0. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_docs: Maximum number of documents to process (default: - all documents) + all documents). :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg refresh: Should the effected indexes be refreshed? + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg refresh: Refresh the shard containing the document before + performing the operation. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting + this request or not, defaults to index level setting. :arg requests_per_second: The throttle for this request in sub- - requests per second. -1 means no throttle. - :arg routing: A comma-separated list of specific routing values + requests per second. -1 means no throttle. Default is 0. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg scroll_size: Size on the scroll request powering the delete - by query Default: 100 + should be maintained for scrolled search. + :arg scroll_size: Size on the scroll request powering the + operation. Default is 100. :arg search_timeout: Explicit timeout for each search request. Defaults to no timeout. - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch - :arg size: Deprecated, please use `max_docs` instead + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. + :arg size: Deprecated, please use `max_docs` instead. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 - :arg sort: A comma-separated list of : pairs + set to `auto`. Default is 1. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes + statistical purposes. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m - :arg version: Specify whether to return document version as part - of a hit + shards that are unavailable. Default is 1m. + :arg version: Whether to return document version as part of a + hit. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the delete by query - operation. Defaults to 1, meaning the primary shard only. Set to `all` - for all shard copies, otherwise set to any non-negative value less than - or equal to the total number of copies for the shard (number of replicas - + 1) - :arg wait_for_completion: Should the request should block until - the delete by query is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -685,9 +691,9 @@ async def delete_by_query_rethrottle(self, task_id, params=None, headers=None): operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -699,16 +705,19 @@ async def delete_by_query_rethrottle(self, task_id, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_script(self, id, params=None, headers=None): """ Deletes a script. - :arg id: Script ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg id: Script ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -734,35 +743,33 @@ async def exists(self, index, id, params=None, headers=None): Returns information about whether a document exists in an index. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return await self.transport.perform_request( - "HEAD", _make_path(index, doc_type, id), params=params, headers=headers + "HEAD", _make_path(index, "_doc", id), params=params, headers=headers ) @query_params( @@ -781,24 +788,24 @@ async def exists_source(self, index, id, params=None, headers=None): Returns information about whether a document source exists in an index. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -829,30 +836,30 @@ async def explain(self, index, id, body=None, params=None, headers=None): Returns information about why a specific matches (or doesn't match) a query. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The query definition using the Query DSL :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg analyze_wildcard: Specify whether wildcards and prefix - queries in the query string query should be analyzed (default: false) - :arg analyzer: The analyzer for the query string query + queries in the query string query should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR - :arg df: The default field for query string query (default: - _all) + query (AND or OR). Valid choices are AND, OR. + :arg df: The default field for query string query. Default is + _all. :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -878,19 +885,19 @@ async def field_caps(self, body=None, index=None, params=None, headers=None): :arg body: An index filter specified with the Query DSL - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fields: A comma-separated list of field names + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fields: Comma-separated list of field names. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_unmapped: Indicates whether unmapped fields should - be included in the response. + be included in the response. Default is false. """ return await self.transport.perform_request( "POST", @@ -917,46 +924,47 @@ async def get(self, index, id, params=None, headers=None): Returns a document. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return await self.transport.perform_request( - "GET", _make_path(index, doc_type, id), params=params, headers=headers + "GET", _make_path(index, "_doc", id), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") async def get_script(self, id, params=None, headers=None): """ Returns a script. - :arg id: Script ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + :arg id: Script ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -981,24 +989,24 @@ async def get_source(self, index, id, params=None, headers=None): Returns the source of a document. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -1026,24 +1034,24 @@ async def mget(self, body, index=None, params=None, headers=None): :arg body: Document identifiers; can be either `docs` - (containing full document information) or `ids` (when index and type is - provided in the URL. - :arg index: The name of the index + (containing full document information) or `ids` (when index is provided + in the URL. + :arg index: Index name. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1072,30 +1080,31 @@ async def msearch(self, body, index=None, params=None, headers=None): :arg body: The request definitions (metadata-search request definition pairs), separated by newlines - :arg index: A comma-separated list of index names to use as - default + :arg index: Comma-separated list of indices to use as default. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg max_concurrent_searches: Controls the maximum number of - concurrent searches the multi search api will execute + concurrent searches the multi search api will execute. :arg max_concurrent_shard_requests: The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in - order to limit the number of concurrent shard requests Default: 5 - :arg pre_filter_shard_size: A threshold that enforces a pre- - filter roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents based - on its rewrite method ie. if date filters are mandatory to match but the - shard bounds and the query are disjoint. + order to limit the number of concurrent shard requests. Default is 5. + :arg pre_filter_shard_size: Threshold that enforces a pre-filter + round-trip to prefilter search shards based on query rewriting if the + number of shards the search request expands to exceeds the threshold. + This filter round-trip can limit the number of shards significantly if + for instance a shard can not match any documents based on its rewrite + method ie. if date filters are mandatory to match but the shard bounds + and the query are disjoint. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + be rendered as an integer or an object in the rest search response. + Default is false. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1123,19 +1132,20 @@ async def msearch_template(self, body, index=None, params=None, headers=None): :arg body: The request definitions (metadata-search request definition pairs), separated by newlines - :arg index: A comma-separated list of index names to use as - default + :arg index: Comma-separated list of indices to use as default. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg max_concurrent_searches: Controls the maximum number of - concurrent searches the multi search api will execute + concurrent searches the multi search api will execute. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + be rendered as an integer or an object in the rest search response. + Default is false. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1175,34 +1185,34 @@ async def mtermvectors(self, body=None, index=None, params=None, headers=None): :arg field_statistics: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified - in body "params" or "docs". Default: True - :arg fields: A comma-separated list of fields to return. Applies - to all returned documents unless otherwise specified in body "params" or - "docs". - :arg ids: A comma-separated list of documents ids. You must - define ids as parameter or set "ids" or "docs" in the request body + in body 'params' or 'docs'. Default is True. + :arg fields: Comma-separated list of fields to return. Applies + to all returned documents unless otherwise specified in body 'params' or + 'docs'. + :arg ids: Comma-separated list of documents ids. You must define + ids as parameter or set 'ids' or 'docs' in the request body. :arg offsets: Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg payloads: Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg positions: Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg preference: Specify the node or shard the operation should - be performed on (default: random) .Applies to all returned documents - unless otherwise specified in body "params" or "docs". + be performed on. Applies to all returned documents unless otherwise + specified in body 'params' or 'docs'. Default is random. :arg realtime: Specifies if requests are real-time as opposed to - near-real-time (default: true). - :arg routing: Specific routing value. Applies to all returned - documents unless otherwise specified in body "params" or "docs". + near-real-time. Default is True. + :arg routing: Routing value. Applies to all returned documents + unless otherwise specified in body 'params' or 'docs'. :arg term_statistics: Specifies if total term frequency and document frequency should be returned. Applies to all returned documents - unless otherwise specified in body "params" or "docs". - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + unless otherwise specified in body 'params' or 'docs'. Default is false. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ path = _make_path(index, "_mtermvectors") @@ -1210,18 +1220,21 @@ async def mtermvectors(self, body=None, index=None, params=None, headers=None): "POST", path, params=params, headers=headers, body=body ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def put_script(self, id, body, context=None, params=None, headers=None): """ Creates or updates a script. - :arg id: Script ID + :arg id: Script ID. :arg body: The document - :arg context: Context name to compile script against - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg context: Script context. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (id, body): if param in SKIP_IN_PATH: @@ -1241,28 +1254,23 @@ async def put_script(self, id, body, context=None, params=None, headers=None): async def rank_eval(self, body, index=None, params=None, headers=None): """ Allows to evaluate the quality of ranked search results over a set of typical - search queries - + search queries. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version :arg body: The ranking evaluation search definition, including search requests, document ratings and ranking metric definition. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be ignored when unavailable (missing or closed). + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1295,24 +1303,24 @@ async def reindex(self, body, params=None, headers=None): :arg body: The search definition using the Query DSL and the prototype for the index request. :arg max_docs: Maximum number of documents to process (default: - all documents) - :arg refresh: Should the affected indexes be refreshed? - :arg requests_per_second: The throttle to set on this request in - sub-requests per second. -1 means no throttle. - :arg scroll: Control how long to keep the search context alive - Default: 5m + all documents). + :arg refresh: Should the affected indexes be refreshed?. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. Default is 0. + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 + set to `auto`. Default is 1. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m + shards that are unavailable. Default is 1m. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the reindex operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) - :arg wait_for_completion: Should the request should block until - the reindex is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1327,9 +1335,9 @@ async def reindex_rethrottle(self, task_id, params=None, headers=None): Changes the number of requests per second for a particular Reindex operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -1350,7 +1358,7 @@ async def render_search_template( :arg body: The search definition template and its params - :arg id: The id of the stored search template + :arg id: The id of the stored search template. """ return await self.transport.perform_request( "POST", @@ -1363,13 +1371,8 @@ async def render_search_template( @query_params() async def scripts_painless_execute(self, body=None, params=None, headers=None): """ - Allows an arbitrary script to be executed and a result to be returned - - - .. warning:: + Allows an arbitrary script to be executed and a result to be returned. - This API is **experimental** so may include breaking changes - or be removed in a future version :arg body: The script to execute """ @@ -1389,11 +1392,12 @@ async def scroll(self, body=None, scroll_id=None, params=None, headers=None): :arg body: The scroll ID if not passed by URL or query parameter. - :arg scroll_id: The scroll ID for scrolled search + :arg scroll_id: Scroll ID. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response + be rendered as an integer or an object in the rest search response. + Default is false. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search + should be maintained for scrolled search. """ if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: raise ValueError("You need to supply scroll_id or body.") @@ -1426,7 +1430,6 @@ async def scroll(self, body=None, scroll_id=None, params=None, headers=None): "ignore_unavailable", "lenient", "max_concurrent_shard_requests", - "min_compatible_shard_node", "pre_filter_shard_size", "preference", "q", @@ -1457,101 +1460,99 @@ async def search(self, body=None, index=None, params=None, headers=None): :arg body: The search definition using the Query DSL - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg allow_partial_search_results: Indicate if an error should - be returned if there is a partial search failure or timeout Default: - True + be returned if there is a partial search failure or timeout. Default is + True. :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg batched_reduce_size: The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. - Default: 512 + Default is 512. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string - :arg docvalue_fields: A comma-separated list of fields to return - as the docvalue representation of a field for each hit + given in the query string. + :arg docvalue_fields: Comma-separated list of fields to return + as the docvalue representation of a field for each hit. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg explain: Specify whether to return detailed information - about score computation as part of a hit - :arg from_: Starting offset (default: 0) + about score computation as part of a hit. + :arg from_: Starting offset. Default is 0. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_concurrent_shard_requests: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order - to limit the number of concurrent shard requests Default: 5 - :arg min_compatible_shard_node: The minimum compatible version - that all shards involved in search should have for this request to be - successful - :arg pre_filter_shard_size: A threshold that enforces a pre- - filter roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents based - on its rewrite method ie. if date filters are mandatory to match but the - shard bounds and the query are disjoint. + to limit the number of concurrent shard requests. Default is 5. + :arg pre_filter_shard_size: Threshold that enforces a pre-filter + round-trip to prefilter search shards based on query rewriting if the + number of shards the search request expands to exceeds the threshold. + This filter round-trip can limit the number of shards significantly if + for instance a shard can not match any documents based on its rewrite + method ie. if date filters are mandatory to match but the shard bounds + and the query are disjoint. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting + this request or not, defaults to index level setting. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg routing: A comma-separated list of specific routing values + be rendered as an integer or an object in the rest search response. + Default is false. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be maintained for scrolled search. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. :arg seq_no_primary_term: Specify whether to return sequence - number and primary term of the last modification of each hit - :arg size: Number of hits to return (default: 10) - :arg sort: A comma-separated list of : pairs + number and primary term of the last modification of each hit. + :arg size: Number of hits to return. Default is 10. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes - :arg stored_fields: A comma-separated list of stored fields to - return as part of a hit - :arg suggest_field: Specify which field to use for suggestions - :arg suggest_mode: Specify suggest mode Valid choices: missing, - popular, always Default: missing - :arg suggest_size: How many suggestions to return in response + statistical purposes. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg suggest_field: Specify which field to use for suggestions. + :arg suggest_mode: Specify suggest mode. Valid choices are + missing, popular, always. + :arg suggest_size: How many suggestions to return in response. :arg suggest_text: The source text for which the suggestions - should be returned + should be returned. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. :arg track_scores: Whether to calculate and return scores even - if they are not used for sorting + if they are not used for sorting. :arg track_total_hits: Indicate if the number of documents that - match the query should be tracked + match the query should be tracked. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response - :arg version: Specify whether to return document version as part - of a hit + should be prefixed by their respective types in the response. + :arg version: Whether to return document version as part of a + hit. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -1579,21 +1580,21 @@ async def search_shards(self, index=None, params=None, headers=None): executed against. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg routing: Specific routing value + be performed on. Default is random. + :arg routing: Routing value. """ return await self.transport.perform_request( "GET", _make_path(index, "_search_shards"), params=params, headers=headers @@ -1620,35 +1621,37 @@ async def search_template(self, body, index=None, params=None, headers=None): :arg body: The search definition template and its params - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg explain: Specify whether to return detailed information - about score computation as part of a hit + about score computation as part of a hit. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg profile: Specify whether to profile the query execution + be performed on. Default is random. + :arg profile: Specify whether to profile the query execution. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg routing: A comma-separated list of specific routing values + be rendered as an integer or an object in the rest search response. + Default is false. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be maintained for scrolled search. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1683,28 +1686,28 @@ async def termvectors(self, index, body=None, id=None, params=None, headers=None :arg index: The index in which the document resides. :arg body: Define parameters and or supply a document to get termvectors for. See documentation. - :arg id: The id of the document, when not specified a doc param - should be supplied. + :arg id: Document ID. When not specified a doc param should be + supplied. :arg field_statistics: Specifies if document count, sum of document frequencies and sum of total term frequencies should be - returned. Default: True - :arg fields: A comma-separated list of fields to return. + returned. Default is True. + :arg fields: Comma-separated list of fields to return. :arg offsets: Specifies if term offsets should be returned. - Default: True + Default is True. :arg payloads: Specifies if term payloads should be returned. - Default: True + Default is True. :arg positions: Specifies if term positions should be returned. - Default: True + Default is True. :arg preference: Specify the node or shard the operation should - be performed on (default: random). + be performed on. Default is random. :arg realtime: Specifies if request is real-time as opposed to - near-real-time (default: true). - :arg routing: Specific routing value. + near-real-time. Default is True. + :arg routing: Routing value. :arg term_statistics: Specifies if total term frequency and - document frequency should be returned. - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + document frequency should be returned. Default is false. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -1734,38 +1737,36 @@ async def update(self, index, id, body, params=None, headers=None): Updates a document with a script or partial document. - :arg index: The name of the index - :arg id: Document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The request definition requires either `script` or partial `doc` :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field - :arg if_primary_term: only perform the update operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the update operation if the last - operation that has changed the document has the specified sequence - number - :arg lang: The script language (default: painless) + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. + :arg lang: The script language. Default is painless. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg require_alias: When true, requires destination is an alias. - Default is false + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg require_alias: When true, requires destination to be an + alias. Default is false. :arg retry_on_conflict: Specify how many times should the - operation be retried when a conflict occurs (default: 0) - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout + operation be retried when a conflict occurs. Default is 0. + :arg routing: Routing value. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the update operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id, body): if param in SKIP_IN_PATH: @@ -1810,7 +1811,6 @@ async def update(self, index, id, body, params=None, headers=None): "terminate_after", "timeout", "version", - "version_type", "wait_for_active_shards", "wait_for_completion", ) @@ -1820,80 +1820,77 @@ async def update_by_query(self, index, body=None, params=None, headers=None): for example to pick up a mapping change. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg body: The search definition using the Query DSL :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg conflicts: What to do when the update by query hits version - conflicts? Valid choices: abort, proceed Default: abort + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. + :arg conflicts: What to do when the operation encounters version + conflicts?. Valid choices are abort, proceed. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg from_: Starting offset (default: 0) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg from_: Starting offset. Default is 0. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_docs: Maximum number of documents to process (default: - all documents) - :arg pipeline: Ingest pipeline to set on index requests made by - this action. (default: none) + all documents). + :arg pipeline: The pipeline id to preprocess incoming documents + with. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg refresh: Should the affected indexes be refreshed? + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg refresh: Should the affected indexes be refreshed?. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting - :arg requests_per_second: The throttle to set on this request in - sub-requests per second. -1 means no throttle. - :arg routing: A comma-separated list of specific routing values + this request or not, defaults to index level setting. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. Default is 0. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg scroll_size: Size on the scroll request powering the update - by query Default: 100 + should be maintained for scrolled search. + :arg scroll_size: Size on the scroll request powering the + operation. Default is 100. :arg search_timeout: Explicit timeout for each search request. Defaults to no timeout. - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch - :arg size: Deprecated, please use `max_docs` instead + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. + :arg size: Deprecated, please use `max_docs` instead. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 - :arg sort: A comma-separated list of : pairs + set to `auto`. Default is 1. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes + statistical purposes. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m - :arg version: Specify whether to return document version as part - of a hit - :arg version_type: Should the document increment the version - number (internal) on hit or not (reindex) + shards that are unavailable. Default is 1m. + :arg version: Whether to return document version as part of a + hit. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the update by query - operation. Defaults to 1, meaning the primary shard only. Set to `all` - for all shard copies, otherwise set to any non-negative value less than - or equal to the total number of copies for the shard (number of replicas - + 1) - :arg wait_for_completion: Should the request should block until - the update by query operation is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -1917,9 +1914,9 @@ async def update_by_query_rethrottle(self, task_id, params=None, headers=None): operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -1936,11 +1933,6 @@ async def get_script_context(self, params=None, headers=None): """ Returns all script contexts. - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version """ return await self.transport.perform_request( "GET", "/_script_context", params=params, headers=headers @@ -1949,13 +1941,8 @@ async def get_script_context(self, params=None, headers=None): @query_params() async def get_script_languages(self, params=None, headers=None): """ - Returns available script types, languages and contexts - - - .. warning:: + Returns available script types, languages and contexts. - This API is **experimental** so may include breaking changes - or be removed in a future version """ return await self.transport.perform_request( "GET", "/_script_language", params=params, headers=headers @@ -1978,11 +1965,11 @@ async def create_pit(self, index, params=None, headers=None): :arg allow_partial_pit_creation: Allow if point in time can be created with partial failures. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg keep_alive: Specify the keep alive for point in time. :arg preference: Specify the node or shard the operation should - be performed on. + be performed on. Default is random. :arg routing: Comma-separated list of specific routing values. """ if index in SKIP_IN_PATH: @@ -2011,7 +1998,7 @@ async def delete_pit(self, body=None, params=None, headers=None): Deletes one or more point in time searches based on the IDs passed. - :arg body: a point-in-time id to delete + :arg body: The point-in-time ids to be deleted """ return await self.transport.perform_request( "DELETE", @@ -2025,36 +2012,8 @@ async def delete_pit(self, body=None, params=None, headers=None): async def get_all_pits(self, params=None, headers=None): """ Lists all active point in time searches. - """ - return await self.transport.perform_request( - "GET", "/_search/point_in_time/_all", params=params, headers=headers - ) - - @query_params() - async def terms_enum(self, index, body=None, params=None, headers=None): - """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto- - complete scenarios. - - - .. warning:: - This API is **beta** so may include breaking changes - or be removed in a future version - - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices - :arg body: field name, string which is the prefix expected in - matching terms, timeout and size for max number of results """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - return await self.transport.perform_request( - "POST", - _make_path(index, "_terms_enum"), - params=params, - headers=headers, - body=body, + "GET", "/_search/point_in_time/_all", params=params, headers=headers ) diff --git a/opensearchpy/_async/client/__init__.pyi b/opensearchpy/_async/client/__init__.pyi index a016d791..32ea967e 100644 --- a/opensearchpy/_async/client/__init__.pyi +++ b/opensearchpy/_async/client/__init__.pyi @@ -25,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from __future__ import unicode_literals import logging @@ -39,6 +48,7 @@ from .indices import IndicesClient from .ingest import IngestClient from .nodes import NodesClient from .remote import RemoteClient +from .remote_store import RemoteStoreClient from .security import SecurityClient from .snapshot import SnapshotClient from .tasks import TasksClient @@ -58,6 +68,7 @@ class AsyncOpenSearch(object): security: SecurityClient snapshot: SnapshotClient tasks: TasksClient + remote_store: RemoteStoreClient def __init__( self, hosts: Any = ..., @@ -333,8 +344,8 @@ class AsyncOpenSearch(object): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -490,8 +501,8 @@ class AsyncOpenSearch(object): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -642,8 +653,8 @@ class AsyncOpenSearch(object): *, body: Any, context: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -802,7 +813,6 @@ class AsyncOpenSearch(object): ignore_unavailable: Optional[Any] = ..., lenient: Optional[Any] = ..., max_concurrent_shard_requests: Optional[Any] = ..., - min_compatible_shard_node: Optional[Any] = ..., pre_filter_shard_size: Optional[Any] = ..., preference: Optional[Any] = ..., q: Optional[Any] = ..., @@ -991,7 +1001,6 @@ class AsyncOpenSearch(object): terminate_after: Optional[Any] = ..., timeout: Optional[Any] = ..., version: Optional[Any] = ..., - version_type: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -1128,21 +1137,3 @@ class AsyncOpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def terms_enum( - self, - index: Any, - *, - body: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/opensearchpy/_async/client/_patch.py b/opensearchpy/_async/client/_patch.py index b1b00942..f3a953c0 100644 --- a/opensearchpy/_async/client/_patch.py +++ b/opensearchpy/_async/client/_patch.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/_patch.pyi b/opensearchpy/_async/client/_patch.pyi index 1912c180..d49a7fec 100644 --- a/opensearchpy/_async/client/_patch.pyi +++ b/opensearchpy/_async/client/_patch.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/cat.py b/opensearchpy/_async/client/cat.py index a4dd9786..2c2b01c0 100644 --- a/opensearchpy/_async/client/cat.py +++ b/opensearchpy/_async/client/cat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -48,17 +49,17 @@ async def aliases(self, name=None, params=None, headers=None): :arg name: Comma-separated list of alias names. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers @@ -83,22 +84,22 @@ async def allocation(self, node_id=None, params=None, headers=None): :arg node_id: Comma-separated list of node IDs or names to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", @@ -119,10 +120,10 @@ async def count(self, index=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "count", index), params=params, headers=headers @@ -137,13 +138,13 @@ async def health(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg ts: Set to false to disable timestamping. (default: True) - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg ts: Set to false to disable timestamping. Default is True. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/health", params=params, headers=headers @@ -155,7 +156,7 @@ async def help(self, params=None, headers=None): Returns help for the Cat APIs. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. """ @@ -187,35 +188,35 @@ async def indices(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. :arg health: Health status ('green', 'yellow', or 'red') to - filter only indices matching the specified health status. Valid - choices: green, yellow, red - :arg help: Return help information. (default: false) + filter only indices matching the specified health status. Valid choices + are green, yellow, red. + :arg help: Return help information. Default is false. :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory. (default: false) + memory. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg pri: Set to true to return stats only for primary shards. - (default: false) + Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "indices", index), params=params, headers=headers @@ -241,15 +242,15 @@ async def master(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ from warnings import warn @@ -280,15 +281,15 @@ async def cluster_manager(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/cluster_manager", params=params, headers=headers @@ -312,27 +313,27 @@ async def nodes(self, params=None, headers=None): Returns basic statistics about performance of cluster nodes. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg full_id: Return the full node ID instead of the shortened - version. (default: false) + version. Default is false. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local (Deprecated: This parameter does not cause this API - to act locally): Return local information, do not retrieve the state - from cluster-manager node. (default: false) + to act locally.): Return local information, do not retrieve the state + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/nodes", params=params, headers=headers @@ -349,20 +350,20 @@ async def recovery(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - shard recoveries. (default: false) - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + shard recoveries. Default is false. + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg detailed: If `true`, the response includes detailed - information about shard recoveries. (default: false) + information about shard recoveries. Default is false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "recovery", index), params=params, headers=headers @@ -387,24 +388,24 @@ async def shards(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "shards", index), params=params, headers=headers @@ -427,20 +428,20 @@ async def segments(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "segments", index), params=params, headers=headers @@ -467,17 +468,17 @@ async def pending_tasks(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/pending_tasks", params=params, headers=headers @@ -507,16 +508,16 @@ async def thread_pool(self, thread_pool_patterns=None, params=None, headers=None :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. :arg size: The multiplier in which to display values. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", @@ -534,15 +535,15 @@ async def fielddata(self, fields=None, params=None, headers=None): :arg fields: Comma-separated list of fields to return in the output. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", @@ -571,15 +572,15 @@ async def plugins(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/plugins", params=params, headers=headers @@ -605,15 +606,15 @@ async def nodeattrs(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/nodeattrs", params=params, headers=headers @@ -639,15 +640,15 @@ async def repositories(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/repositories", params=params, headers=headers @@ -675,17 +676,18 @@ async def snapshots(self, repository=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed). (default: false) + should be ignored when unavailable (missing or closed). Default is + false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", @@ -714,12 +716,12 @@ async def tasks(self, params=None, headers=None): :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information. (default: - false) + :arg detailed: Return detailed task information. Default is + false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all @@ -728,9 +730,9 @@ async def tasks(self, params=None, headers=None): (node_id:task_number). Set to -1 to return all. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", "/_cat/tasks", params=params, headers=headers @@ -757,15 +759,15 @@ async def templates(self, name=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", _make_path("_cat", "templates", name), params=params, headers=headers @@ -787,7 +789,6 @@ async def pit_segments(self, body=None, params=None, headers=None): List segments for one or several PITs. - :arg body: """ return await self.transport.perform_request( "GET", "/_cat/pit_segments", params=params, headers=headers, body=body @@ -815,23 +816,23 @@ async def segment_replication(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - segment replication events. (default: false) - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + segment replication events. Default is false. + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg completed_only: If `true`, the response only includes - latest completed segment replication events. (default: false) + latest completed segment replication events. Default is false. :arg detailed: If `true`, the response includes detailed - information about segment replications. (default: false) + information about segment replications. Default is false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. :arg shards: Comma-separated list of shards to display. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return await self.transport.perform_request( "GET", diff --git a/opensearchpy/_async/client/cat.pyi b/opensearchpy/_async/client/cat.pyi index 435403e9..404400cd 100644 --- a/opensearchpy/_async/client/cat.pyi +++ b/opensearchpy/_async/client/cat.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/cluster.py b/opensearchpy/_async/client/cluster.py index b64bdc5b..8bd55390 100644 --- a/opensearchpy/_async/client/cluster.py +++ b/opensearchpy/_async/client/cluster.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -65,22 +66,22 @@ async def health(self, index=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg level: Specify the level of detail for returned - information. Valid choices: cluster, indices, shards, - awareness_attributes + information. Valid choices are cluster, indices, shards, + awareness_attributes. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. :arg wait_for_active_shards: Wait until the specified number of shards is active. :arg wait_for_events: Wait until all currently queued events - with the given priority are processed. Valid choices: immediate, - urgent, high, normal, low, languid + with the given priority are processed. Valid choices are immediate, + urgent, high, normal, low, languid. :arg wait_for_no_initializing_shards: Whether to wait until there are no initializing shards in the cluster. :arg wait_for_no_relocating_shards: Whether to wait until there @@ -88,7 +89,7 @@ async def health(self, index=None, params=None, headers=None): :arg wait_for_nodes: Wait until the specified number of nodes is available. :arg wait_for_status: Wait until cluster is in a specific state. - Valid choices: green, yellow, red + Valid choices are green, yellow, red. """ return await self.transport.perform_request( "GET", @@ -107,10 +108,10 @@ async def pending_tasks(self, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", "/_cluster/pending_tasks", params=params, headers=headers @@ -133,8 +134,8 @@ async def state(self, metric=None, index=None, params=None, headers=None): :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, blocks, metadata, nodes, routing_table, - routing_nodes, master_node, cluster_manager_node, version + metrics. Valid choices are _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, cluster_manager_node, version. :arg index: Comma-separated list of indices; use `_all` or empty string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices @@ -143,17 +144,17 @@ async def state(self, metric=None, index=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none - :arg flat_settings: Return settings in flat format. (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version. :arg wait_for_timeout: The maximum time to wait for @@ -179,8 +180,8 @@ async def stats(self, node_id=None, params=None, headers=None): the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg timeout: Operation timeout. """ return await self.transport.perform_request( @@ -215,8 +216,8 @@ async def reroute(self, body=None, params=None, headers=None): :arg explain: Return an explanation of why the commands can or cannot be executed. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg metric: Limit the information returned to the specified metrics. Defaults to all but metadata. :arg retry_failed: Retries allocation of shards that are blocked @@ -241,13 +242,13 @@ async def get_settings(self, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg include_defaults: Whether to return all default clusters - setting. (default: false) + setting. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ return await self.transport.perform_request( @@ -266,11 +267,11 @@ async def put_settings(self, body, params=None, headers=None): or `persistent` (survives cluster restart). :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: @@ -299,9 +300,9 @@ async def allocation_explain(self, body=None, params=None, headers=None): :arg body: The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' :arg include_disk_info: Return information about disk usage and - shard sizes. (default: false) + shard sizes. Default is false. :arg include_yes_decisions: Return 'YES' decisions in - explanation. (default: false) + explanation. Default is false. """ return await self.transport.perform_request( "POST", @@ -321,8 +322,8 @@ async def delete_component_template(self, name, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: @@ -345,10 +346,10 @@ async def get_component_template(self, name=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", @@ -368,10 +369,10 @@ async def put_component_template(self, name, body, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one. (default: false) + new or can also replace an existing one. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ for param in (name, body): @@ -386,18 +387,20 @@ async def put_component_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("local", "master_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") async def exists_component_template(self, name, params=None, headers=None): """ Returns information about whether a particular component template exist. :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -417,7 +420,7 @@ async def delete_voting_config_exclusions(self, params=None, headers=None): :arg wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting - configuration exclusions list. (default: True) + configuration exclusions list. Default is True. """ return await self.transport.perform_request( "DELETE", diff --git a/opensearchpy/_async/client/cluster.pyi b/opensearchpy/_async/client/cluster.pyi index 2685cbb5..74f88694 100644 --- a/opensearchpy/_async/client/cluster.pyi +++ b/opensearchpy/_async/client/cluster.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -300,6 +301,7 @@ class ClusterClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., diff --git a/opensearchpy/_async/client/dangling_indices.py b/opensearchpy/_async/client/dangling_indices.py index cf382c52..bc886d65 100644 --- a/opensearchpy/_async/client/dangling_indices.py +++ b/opensearchpy/_async/client/dangling_indices.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -53,8 +54,8 @@ async def delete_dangling_index(self, index_uuid, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: @@ -81,8 +82,8 @@ async def import_dangling_index(self, index_uuid, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: diff --git a/opensearchpy/_async/client/dangling_indices.pyi b/opensearchpy/_async/client/dangling_indices.pyi index 17ab1ac8..d9dea8a1 100644 --- a/opensearchpy/_async/client/dangling_indices.pyi +++ b/opensearchpy/_async/client/dangling_indices.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/features.py b/opensearchpy/_async/client/features.py index 7922f955..e2c1bb7d 100644 --- a/opensearchpy/_async/client/features.py +++ b/opensearchpy/_async/client/features.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/features.pyi b/opensearchpy/_async/client/features.pyi index 96acb588..38fb992e 100644 --- a/opensearchpy/_async/client/features.pyi +++ b/opensearchpy/_async/client/features.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/indices.py b/opensearchpy/_async/client/indices.py index d58a3fb5..b83cb73c 100644 --- a/opensearchpy/_async/client/indices.py +++ b/opensearchpy/_async/client/indices.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -25,6 +26,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -38,7 +49,7 @@ async def analyze(self, body=None, index=None, params=None, headers=None): :arg body: Define analyzer/tokenizer parameters and the text on which the analysis should be performed - :arg index: The name of the index to scope the operation + :arg index: The name of the index to scope the operation. """ return await self.transport.perform_request( "POST", @@ -54,16 +65,16 @@ async def refresh(self, index=None, params=None, headers=None): Performs the refresh operation in one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). """ return await self.transport.perform_request( "POST", _make_path(index, "_refresh"), params=params, headers=headers @@ -81,44 +92,47 @@ async def flush(self, index=None, params=None, headers=None): Performs the flush operation on one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string for all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg force: Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as - internal) + internal). :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg wait_if_ongoing: If set to true the flush operation will block until the flush can be executed if another flush operation is - already executing. The default is true. If set to false the flush will - be skipped iff if another flush operation is already running. + already executing. If set to false the flush will be skipped iff if + another flush operation is already running. Default is True. """ return await self.transport.perform_request( "POST", _make_path(index, "_flush"), params=params, headers=headers ) @query_params( - "master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards" + "cluster_manager_timeout", "master_timeout", "timeout", "wait_for_active_shards" ) async def create(self, index, body=None, params=None, headers=None): """ Creates an index with optional settings and mappings. - :arg index: The name of the index + :arg index: Index name. :arg body: The configuration for the index (`settings` and `mappings`) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for before the operation returns. """ @@ -130,20 +144,23 @@ async def create(self, index, body=None, params=None, headers=None): ) @query_params( - "master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards" + "cluster_manager_timeout", "master_timeout", "timeout", "wait_for_active_shards" ) async def clone(self, index, target, body=None, params=None, headers=None): """ - Clones an index + Clones an index. - :arg index: The name of the source index to clone - :arg target: The name of the target index to clone into + :arg index: The name of the source index to clone. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the cloned index before the operation returns. """ @@ -161,35 +178,40 @@ async def clone(self, index, target, body=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "include_defaults", "local", "master_timeout", - "cluster_manager_timeout", ) async def get(self, index, params=None, headers=None): """ Returns information about one or more indices. - :arg index: A comma-separated list of index names - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) + :arg index: Comma-separated list of indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -200,10 +222,10 @@ async def get(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -212,18 +234,21 @@ async def open(self, index, params=None, headers=None): Opens an index. - :arg index: A comma separated list of indices to open + :arg index: Comma-separated list of indices to open. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of active shards to wait for before the operation returns. """ @@ -236,10 +261,10 @@ async def open(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -248,22 +273,23 @@ async def close(self, index, params=None, headers=None): Closes an index. - :arg index: A comma separated list of indices to close + :arg index: Comma-separated list of indices to close. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. Set to `index-setting` to wait - according to the index setting `index.write.wait_for_active_shards`, or - `all` to wait for all shards, or an integer. Defaults to `0`. + wait for before the operation returns. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -274,10 +300,10 @@ async def close(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", ) async def delete(self, index, params=None, headers=None): @@ -285,18 +311,23 @@ async def delete(self, index, params=None, headers=None): Deletes an index. - :arg index: A comma-separated list of indices to delete; use - `_all` or `*` string to delete all indices - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg index: Comma-separated list of indices to delete; use + `_all` or `*` string to delete all indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -318,20 +349,22 @@ async def exists(self, index, params=None, headers=None): Returns information about whether a particular index exists. - :arg index: A comma-separated list of index names - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) + :arg index: Comma-separated list of indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -342,10 +375,10 @@ async def exists(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "write_index_only", ) @@ -355,26 +388,31 @@ async def put_mapping(self, body, index=None, params=None, headers=None): :arg body: The mapping definition - :arg index: A comma-separated list of index names the mapping - should be added to (supports wildcards); use `_all` or omit to add the - mapping on all indices. + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg write_index_only: When true, applies mappings only to the - write index of an alias or data stream + write index of an alias or data stream. Default is false. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") + if index in SKIP_IN_PATH: + index = "_all" + return await self.transport.perform_request( "PUT", _make_path(index, "_mapping"), @@ -385,36 +423,37 @@ async def put_mapping(self, body, index=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "local", "master_timeout", - "cluster_manager_timeout", ) async def get_mapping(self, index=None, params=None, headers=None): """ Returns mappings for one or more indices. - :arg index: A comma-separated list of index names + :arg index: Comma-separated list of indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + should be ignored when unavailable (missing or closed). + :arg local (Deprecated: This parameter is a no-op and field + mappings are always retrieved locally.): Return local information, do + not retrieve the state from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( - "GET", - _make_path(index, "_mapping"), - params=params, - headers=headers, + "GET", _make_path(index, "_mapping"), params=params, headers=headers ) @query_params( @@ -429,20 +468,20 @@ async def get_field_mapping(self, fields, index=None, params=None, headers=None) Returns mapping for one or more fields. - :arg fields: A comma-separated list of fields - :arg index: A comma-separated list of index names + :arg fields: Comma-separated list of fields. + :arg index: Comma-separated list of indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_defaults: Whether the default mapping values should - be returned as well + be returned as well. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'fields'.") @@ -454,21 +493,23 @@ async def get_field_mapping(self, fields, index=None, params=None, headers=None) headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def put_alias(self, index, name, body=None, params=None, headers=None): """ Creates or updates an alias. - :arg index: A comma-separated list of index names the alias - should point to (supports wildcards); use `_all` to perform the - operation on all indices. - :arg name: The name of the alias to be created or updated + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: The name of the alias to be created or updated. :arg body: The settings for the alias, such as `routing` or `filter` - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit timestamp for the document + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, name): if param in SKIP_IN_PATH: @@ -488,19 +529,18 @@ async def exists_alias(self, name, index=None, params=None, headers=None): Returns information about whether a particular alias exists. - :arg name: A comma-separated list of alias names to return - :arg index: A comma-separated list of index names to filter - aliases + :arg name: Comma-separated list of alias names. + :arg index: Comma-separated list of indices to filter aliases. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -515,34 +555,36 @@ async def get_alias(self, index=None, name=None, params=None, headers=None): Returns an alias. - :arg index: A comma-separated list of index names to filter - aliases - :arg name: A comma-separated list of alias names to return + :arg index: Comma-separated list of indices to filter aliases. + :arg name: Comma-separated list of alias names. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ return await self.transport.perform_request( "GET", _make_path(index, "_alias", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def update_aliases(self, body, params=None, headers=None): """ Updates index aliases. :arg body: The definition of `actions` to perform - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Request timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -551,19 +593,22 @@ async def update_aliases(self, body, params=None, headers=None): "POST", "/_aliases", params=params, headers=headers, body=body ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_alias(self, index, name, params=None, headers=None): """ Deletes an alias. - :arg index: A comma-separated list of index names (supports - wildcards); use `_all` for all indices - :arg name: A comma-separated list of aliases to delete (supports + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: Comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit timestamp for the document + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, name): if param in SKIP_IN_PATH: @@ -573,21 +618,24 @@ async def delete_alias(self, index, name, params=None, headers=None): "DELETE", _make_path(index, "_alias", name), params=params, headers=headers ) - @query_params("create", "master_timeout", "cluster_manager_timeout", "order") + @query_params("cluster_manager_timeout", "create", "master_timeout", "order") async def put_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + new or can also replace an existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg order: The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower - numbers) + numbers). """ for param in (name, body): if param in SKIP_IN_PATH: @@ -601,21 +649,22 @@ async def put_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") async def exists_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -624,36 +673,40 @@ async def exists_template(self, name, params=None, headers=None): "HEAD", _make_path("_template", name), params=params, headers=headers ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") async def get_template(self, name=None, params=None, headers=None): """ Returns an index template. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", _make_path("_template", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_template(self, name, params=None, headers=None): """ Deletes an index template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -664,38 +717,41 @@ async def delete_template(self, name, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "include_defaults", "local", "master_timeout", - "cluster_manager_timeout", ) async def get_settings(self, index=None, name=None, params=None, headers=None): """ Returns settings for one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices - :arg name: The name of the settings that should be included + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: Comma-separated list of settings. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg flat_settings: Return settings in flat format (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", _make_path(index, "_settings", name), params=params, headers=headers @@ -703,11 +759,11 @@ async def get_settings(self, index=None, name=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "preserve_existing", "timeout", ) @@ -717,24 +773,27 @@ async def put_settings(self, body, index=None, params=None, headers=None): :arg body: The index settings to be updated - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg preserve_existing: Whether to update existing settings. If - set to `true` existing settings on an index remain unchanged, the - default is `false` - :arg timeout: Explicit operation timeout + set to `true` existing settings on an index remain unchanged. Default is + false. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -757,43 +816,40 @@ async def put_settings(self, body, index=None, params=None, headers=None): "include_segment_file_sizes", "include_unloaded_segments", "level", - "types", ) async def stats(self, index=None, metric=None, params=None, headers=None): """ Provides statistics on operations happening in an index. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg metric: Limit the information returned the specific - metrics. Valid choices: _all, completion, docs, fielddata, query_cache, - flush, get, indexing, merge, request_cache, refresh, search, segments, - store, warmer, suggest - :arg completion_fields: A comma-separated list of fields for - `fielddata` and `suggest` index metric (supports wildcards) + metrics. Valid choices are _all, store, indexing, get, search, merge, + flush, refresh, query_cache, fielddata, docs, warmer, completion, + segments, translog, suggest, request_cache, recovery. + :arg completion_fields: Comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fielddata_fields: A comma-separated list of fields for - `fielddata` index metric (supports wildcards) - :arg fields: A comma-separated list of fields for `fielddata` - and `completion` index metric (supports wildcards) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fielddata_fields: Comma-separated list of fields for + `fielddata` index metric (supports wildcards). + :arg fields: Comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards). :arg forbid_closed_indices: If set to false stats will also collected from closed indices if explicitly specified or if - expand_wildcards expands to closed indices Default: True - :arg groups: A comma-separated list of search groups for - `search` index metric + expand_wildcards expands to closed indices. Default is True. + :arg groups: Comma-separated list of search groups for `search` + index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested) + applies if segment stats are requested). Default is false. :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory + memory. Default is false. :arg level: Return stats aggregated at cluster, index or shard - level Valid choices: cluster, indices, shards Default: indices - :arg types: A comma-separated list of document types for the - `indexing` index metric + level. Valid choices are cluster, indices, shards. """ return await self.transport.perform_request( "GET", _make_path(index, "_stats", metric), params=params, headers=headers @@ -807,17 +863,18 @@ async def segments(self, index=None, params=None, headers=None): Provides low-level information about segments in a Lucene index. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg verbose: Includes detailed memory usage by Lucene. + should be ignored when unavailable (missing or closed). + :arg verbose: Includes detailed memory usage by Lucene. Default + is false. """ return await self.transport.perform_request( "GET", _make_path(index, "_segments"), params=params, headers=headers @@ -843,30 +900,29 @@ async def validate_query(self, body=None, index=None, params=None, headers=None) :arg body: The query definition specified with the Query DSL - :arg index: A comma-separated list of index names to restrict - the operation; use `_all` or empty string to perform the operation on - all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg all_shards: Execute validation on all shards instead of one - random shard per index + random shard per index. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg explain: Return detailed information about the error + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg explain: Return detailed information about the error. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored - :arg q: Query in the Lucene query string syntax + as providing text to a numeric field) should be ignored. + :arg q: Query in the Lucene query string syntax. :arg rewrite: Provide a more detailed explanation showing the actual Lucene query that will be executed. """ @@ -892,21 +948,21 @@ async def clear_cache(self, index=None, params=None, headers=None): Clears all or specific caches for one or more indices. - :arg index: A comma-separated list of index name to limit the - operation + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fielddata: Clear field data - :arg fields: A comma-separated list of fields to clear when - using the `fielddata` parameter (default: all) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fielddata: Clear field data. + :arg fields: Comma-separated list of fields to clear when using + the `fielddata` parameter (default: all). :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg query: Clear query caches - :arg request: Clear request cache + should be ignored when unavailable (missing or closed). + :arg query: Clear query caches. + :arg request: Clear request cache. """ return await self.transport.perform_request( "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers @@ -918,12 +974,12 @@ async def recovery(self, index=None, params=None, headers=None): Returns information about ongoing index shard recoveries. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg active_only: Display only those recoveries that are - currently on-going + currently on-going. Default is false. :arg detailed: Whether to display detailed information about - shard recovery + shard recovery. Default is false. """ return await self.transport.perform_request( "GET", _make_path(index, "_recovery"), params=params, headers=headers @@ -938,23 +994,23 @@ async def recovery(self, index=None, params=None, headers=None): ) async def upgrade(self, index=None, params=None, headers=None): """ - DEPRECATED Upgrades to the current version of Lucene. + The _upgrade API is no longer useful and will be removed. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg only_ancient_segments: If true, only ancient (an older - Lucene major release) segments will be upgraded - :arg wait_for_completion: Specify whether the request should - block until the all segments are upgraded (default: false) + Lucene major release) segments will be upgraded. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is false. """ return await self.transport.perform_request( "POST", _make_path(index, "_upgrade"), params=params, headers=headers @@ -963,19 +1019,19 @@ async def upgrade(self, index=None, params=None, headers=None): @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") async def get_upgrade(self, index=None, params=None, headers=None): """ - DEPRECATED Returns a progress status of current upgrade. + The _upgrade API is no longer useful and will be removed. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). """ return await self.transport.perform_request( "GET", _make_path(index, "_upgrade"), params=params, headers=headers @@ -989,19 +1045,18 @@ async def shard_stores(self, index=None, params=None, headers=None): Provides store information for shard copies of indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg status: A comma-separated list of statuses used to filter - on shards to get store information for Valid choices: green, yellow, - red, all + should be ignored when unavailable (missing or closed). + :arg status: Comma-separated list of statuses used to filter on + shards to get store information for. """ return await self.transport.perform_request( "GET", _make_path(index, "_shard_stores"), params=params, headers=headers @@ -1020,31 +1075,31 @@ async def forcemerge(self, index=None, params=None, headers=None): Performs the force merge operation on one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg flush: Specify whether the index should be flushed after - performing the operation (default: true) + performing the operation. Default is True. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg max_num_segments: The number of segments the index should - be merged into (default: dynamic) + be merged into (default: dynamic). :arg only_expunge_deletes: Specify whether the operation should - only expunge deleted documents + only expunge deleted documents. """ return await self.transport.perform_request( "POST", _make_path(index, "_forcemerge"), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "copy_settings", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1053,15 +1108,18 @@ async def shrink(self, index, target, body=None, params=None, headers=None): Allow to shrink an existing index into a new index with fewer primary shards. - :arg index: The name of the source index to shrink - :arg target: The name of the target index to shrink into + :arg index: The name of the source index to shrink. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg copy_settings: whether or not to copy settings from the - source index (defaults to false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + source index. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ @@ -1078,9 +1136,9 @@ async def shrink(self, index, target, body=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "copy_settings", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1090,15 +1148,18 @@ async def split(self, index, target, body=None, params=None, headers=None): shards. - :arg index: The name of the source index to split - :arg target: The name of the target index to split into + :arg index: The name of the source index to split. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg copy_settings: whether or not to copy settings from the - source index (defaults to false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + source index. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ @@ -1115,9 +1176,9 @@ async def split(self, index, target, body=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "dry_run", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1129,16 +1190,19 @@ async def rollover( to be too large or too old. - :arg alias: The name of the alias to rollover + :arg alias: The name of the alias to rollover. :arg body: The conditions that needs to be met for executing rollover - :arg new_index: The name of the rollover index + :arg new_index: The name of the rollover index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg dry_run: If set to true the rollover action will only be - validated but not actually performed even if a condition matches. The - default is false - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + validated but not actually performed even if a condition matches. + Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the newly created rollover index before the operation returns. @@ -1154,133 +1218,34 @@ async def rollover( body=body, ) - @query_params( - "allow_no_indices", - "expand_wildcards", - "ignore_unavailable", - "master_timeout", - "cluster_manager_timeout", - "timeout", - "wait_for_active_shards", - ) - async def freeze(self, index, params=None, headers=None): - """ - Freezes an index. A frozen index has almost no overhead on the cluster (except - for maintaining its metadata in memory) and is read-only. - - - :arg index: The name of the index to freeze - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout - :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return await self.transport.perform_request( - "POST", _make_path(index, "_freeze"), params=params, headers=headers - ) - - @query_params( - "allow_no_indices", - "expand_wildcards", - "ignore_unavailable", - "master_timeout", - "cluster_manager_timeout", - "timeout", - "wait_for_active_shards", - ) - async def unfreeze(self, index, params=None, headers=None): - """ - Unfreezes an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. - - - :arg index: The name of the index to unfreeze - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout - :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return await self.transport.perform_request( - "POST", _make_path(index, "_unfreeze"), params=params, headers=headers - ) - - @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - async def reload_search_analyzers(self, index, params=None, headers=None): - """ - Reloads an index's search analyzers and their resources. - - - :arg index: A comma-separated list of index names to reload - analyzers for - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return await self.transport.perform_request( - "GET", - _make_path(index, "_reload_search_analyzers"), - params=params, - headers=headers, - ) - @query_params() - async def create_data_stream(self, name, params=None, headers=None): + async def create_data_stream(self, name, body=None, params=None, headers=None): """ - Creates a data stream + Creates or updates a data stream. - :arg name: The name of the data stream + :arg name: The name of the data stream. + :arg body: The data stream definition """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") return await self.transport.perform_request( - "PUT", _make_path("_data_stream", name), params=params, headers=headers + "PUT", + _make_path("_data_stream", name), + params=params, + headers=headers, + body=body, ) - @query_params("expand_wildcards") + @query_params() async def delete_data_stream(self, name, params=None, headers=None): """ Deletes a data stream. - :arg name: A comma-separated list of data streams to delete; use - `*` to delete all data streams - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1289,16 +1254,19 @@ async def delete_data_stream(self, name, params=None, headers=None): "DELETE", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_index_template(self, name, params=None, headers=None): """ Deletes an index template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1310,21 +1278,22 @@ async def delete_index_template(self, name, params=None, headers=None): headers=headers, ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") async def exists_index_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. - :arg name: The name of the template - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1333,40 +1302,44 @@ async def exists_index_template(self, name, params=None, headers=None): "HEAD", _make_path("_index_template", name), params=params, headers=headers ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") async def get_index_template(self, name=None, params=None, headers=None): """ Returns an index template. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", _make_path("_index_template", name), params=params, headers=headers ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") async def put_index_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition :arg cause: User defined reason for creating/updating the index - template + template. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + new or can also replace an existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (name, body): if param in SKIP_IN_PATH: @@ -1380,24 +1353,27 @@ async def put_index_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") async def simulate_index_template(self, name, body=None, params=None, headers=None): """ Simulate matching the given index name against the index templates in the - system + system. :arg name: The name of the index (it must be a concrete index - name) + name). :arg body: New index template definition, which will be included in the simulation, as if it already exists in the system :arg cause: User defined reason for dry-run creating the new - template for simulation purposes + template for simulation purposes. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an - existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1410,38 +1386,38 @@ async def simulate_index_template(self, name, body=None, params=None, headers=No body=body, ) - @query_params("expand_wildcards") + @query_params() async def get_data_stream(self, name=None, params=None, headers=None): """ Returns data streams. - :arg name: A comma-separated list of data streams to get; use - `*` to get all data streams - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ return await self.transport.perform_request( "GET", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") async def simulate_template(self, body=None, name=None, params=None, headers=None): """ - Simulate resolving the given template name or body + Simulate resolving the given template name or body. :arg body: New index template definition to be simulated, if no index template name is specified - :arg name: The name of the index template + :arg name: The name of the template. :arg cause: User defined reason for dry-run creating the new - template for simulation purposes + template for simulation purposes. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an - existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "POST", @@ -1454,19 +1430,14 @@ async def simulate_template(self, body=None, name=None, params=None, headers=Non @query_params("expand_wildcards") async def resolve_index(self, name, params=None, headers=None): """ - Returns information about any matching indices, aliases, and data streams + Returns information about any matching indices, aliases, and data streams. - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg name: A comma-separated list of names or wildcard - expressions - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of names or wildcard + expressions. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1477,10 +1448,10 @@ async def resolve_index(self, name, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", ) async def add_block(self, index, block, params=None, headers=None): @@ -1488,20 +1459,23 @@ async def add_block(self, index, block, params=None, headers=None): Adds a block to an index. - :arg index: A comma separated list of indices to add a block to + :arg index: Comma-separated list of indices to add a block to. :arg block: The block to add (one of read, write, read_only or - metadata) + metadata). :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, block): if param in SKIP_IN_PATH: @@ -1517,8 +1491,8 @@ async def data_streams_stats(self, name=None, params=None, headers=None): Provides statistics on operations happening in a data stream. - :arg name: A comma-separated list of data stream names; use - `_all` or empty string to perform the operation on all data streams + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ return await self.transport.perform_request( "GET", @@ -1526,115 +1500,3 @@ async def data_streams_stats(self, name=None, params=None, headers=None): params=params, headers=headers, ) - - @query_params() - async def promote_data_stream(self, name, params=None, headers=None): - """ - Promotes a data stream from a replicated data stream managed by CCR to a - regular data stream - - - :arg name: The name of the data stream - """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'name'.") - - return await self.transport.perform_request( - "POST", - _make_path("_data_stream", "_promote", name), - params=params, - headers=headers, - ) - - @query_params() - async def migrate_to_data_stream(self, name, params=None, headers=None): - """ - Migrates an alias to a data stream - - - :arg name: The name of the alias to migrate - """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'name'.") - - return await self.transport.perform_request( - "POST", - _make_path("_data_stream", "_migrate", name), - params=params, - headers=headers, - ) - - @query_params( - "allow_no_indices", - "expand_wildcards", - "flush", - "ignore_unavailable", - "run_expensive_tasks", - ) - async def disk_usage(self, index, params=None, headers=None): - """ - Analyzes the disk usage of each field of an index or data stream - - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg index: Comma-separated list of indices or data streams to - analyze the disk usage - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flush: Whether flush or not before analyzing the index disk - usage. Defaults to true - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg run_expensive_tasks: Must be set to [true] in order for the - task to be performed. Defaults to false. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return await self.transport.perform_request( - "POST", _make_path(index, "_disk_usage"), params=params, headers=headers - ) - - @query_params( - "allow_no_indices", "expand_wildcards", "fields", "ignore_unavailable" - ) - async def field_usage_stats(self, index, params=None, headers=None): - """ - Returns the field usage stats for each field of an index - - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fields: A comma-separated list of fields to include in the - stats if only a subset of fields should be returned (supports wildcards) - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return await self.transport.perform_request( - "GET", - _make_path(index, "_field_usage_stats"), - params=params, - headers=headers, - ) diff --git a/opensearchpy/_async/client/indices.pyi b/opensearchpy/_async/client/indices.pyi index 53f6d87f..1a5c0912 100644 --- a/opensearchpy/_async/client/indices.pyi +++ b/opensearchpy/_async/client/indices.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -24,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -94,8 +104,8 @@ class IndicesClient(NamespacedClient): index: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -117,8 +127,8 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -139,13 +149,13 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., include_defaults: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -164,10 +174,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -188,10 +198,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -212,10 +222,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -259,10 +269,10 @@ class IndicesClient(NamespacedClient): body: Any, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., write_index_only: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -283,11 +293,11 @@ class IndicesClient(NamespacedClient): *, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -330,8 +340,8 @@ class IndicesClient(NamespacedClient): name: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -394,8 +404,8 @@ class IndicesClient(NamespacedClient): self, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -415,8 +425,8 @@ class IndicesClient(NamespacedClient): index: Any, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -436,9 +446,9 @@ class IndicesClient(NamespacedClient): name: Any, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., order: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -457,10 +467,10 @@ class IndicesClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -478,10 +488,10 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -499,8 +509,8 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -521,13 +531,13 @@ class IndicesClient(NamespacedClient): index: Optional[Any] = ..., name: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., include_defaults: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -547,11 +557,11 @@ class IndicesClient(NamespacedClient): body: Any, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., preserve_existing: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -581,7 +591,6 @@ class IndicesClient(NamespacedClient): include_segment_file_sizes: Optional[Any] = ..., include_unloaded_segments: Optional[Any] = ..., level: Optional[Any] = ..., - types: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -731,26 +740,6 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def flush_synced( - self, - *, - index: Optional[Any] = ..., - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... async def shard_stores( self, *, @@ -801,9 +790,9 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., copy_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -825,9 +814,9 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., copy_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -849,57 +838,9 @@ class IndicesClient(NamespacedClient): *, body: Optional[Any] = ..., new_index: Optional[Any] = ..., - dry_run: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., - timeout: Optional[Any] = ..., - wait_for_active_shards: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def freeze( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - timeout: Optional[Any] = ..., - wait_for_active_shards: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def unfreeze( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., + dry_run: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -915,30 +856,11 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def reload_search_analyzers( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... async def create_data_stream( self, name: Any, *, + body: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -956,7 +878,6 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - expand_wildcards: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -974,8 +895,8 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -994,10 +915,10 @@ class IndicesClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1015,10 +936,10 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1038,9 +959,9 @@ class IndicesClient(NamespacedClient): *, body: Any, cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1060,9 +981,9 @@ class IndicesClient(NamespacedClient): *, body: Optional[Any] = ..., cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1080,7 +1001,6 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1100,9 +1020,9 @@ class IndicesClient(NamespacedClient): body: Optional[Any] = ..., name: Optional[Any] = ..., cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1140,10 +1060,10 @@ class IndicesClient(NamespacedClient): block: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -1175,80 +1095,3 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - async def promote_data_stream( - self, - name: Any, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def migrate_to_data_stream( - self, - name: Any, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def disk_usage( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - flush: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - run_expensive_tasks: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def field_usage_stats( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - fields: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/opensearchpy/_async/client/ingest.py b/opensearchpy/_async/client/ingest.py index cb5253eb..0d56f7e1 100644 --- a/opensearchpy/_async/client/ingest.py +++ b/opensearchpy/_async/client/ingest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -50,8 +51,8 @@ async def get_pipeline(self, id=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers @@ -68,8 +69,8 @@ async def put_pipeline(self, id, body, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ for param in (id, body): @@ -94,8 +95,8 @@ async def delete_pipeline(self, id, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: @@ -117,7 +118,7 @@ async def simulate(self, body, id=None, params=None, headers=None): :arg body: The simulate definition :arg id: Pipeline ID. :arg verbose: Verbose mode. Display data output for each - processor in executed pipeline. + processor in executed pipeline. Default is false. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") diff --git a/opensearchpy/_async/client/ingest.pyi b/opensearchpy/_async/client/ingest.pyi index 40d3c7d9..9dd4fc2b 100644 --- a/opensearchpy/_async/client/ingest.pyi +++ b/opensearchpy/_async/client/ingest.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/nodes.py b/opensearchpy/_async/client/nodes.py index e0e8b06b..a89fee94 100644 --- a/opensearchpy/_async/client/nodes.py +++ b/opensearchpy/_async/client/nodes.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -73,10 +74,10 @@ async def info(self, node_id=None, metric=None, params=None, headers=None): node you're connecting to, leave empty to get information from all nodes. :arg metric: Comma-separated list of metrics you wish returned. - Leave empty to return all. Valid choices: settings, os, process, jvm, - thread_pool, transport, http, plugins, ingest - :arg flat_settings: Return settings in flat format. (default: - false) + Leave empty to return all. Valid choices are settings, os, process, jvm, + thread_pool, transport, http, plugins, ingest. + :arg flat_settings: Return settings in flat format. Default is + false. :arg timeout: Operation timeout. """ return await self.transport.perform_request( @@ -105,13 +106,13 @@ async def stats( node you're connecting to, leave empty to get information from all nodes. :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, breaker, fs, http, indices, jvm, os, - process, thread_pool, transport, discovery, indexing_pressure + metrics. Valid choices are _all, breaker, fs, http, indices, jvm, os, + process, thread_pool, transport, discovery, indexing_pressure. :arg index_metric: Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) - metric isn't specified. Valid choices: _all, store, indexing, get, + metric isn't specified. Valid choices are _all, store, indexing, get, search, merge, flush, refresh, query_cache, fielddata, docs, warmer, - completion, segments, translog, suggest, request_cache, recovery + completion, segments, translog, suggest, request_cache, recovery. :arg completion_fields: Comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards). :arg fielddata_fields: Comma-separated list of fields for @@ -122,9 +123,9 @@ async def stats( index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested). (default: false) + applies if segment stats are requested). Default is false. :arg level: Return indices stats aggregated at index, node or - shard level. Valid choices: indices, node, shards + shard level. Valid choices are indices, node, shards. :arg timeout: Operation timeout. :arg types: Comma-separated list of document types for the `indexing` index metric. @@ -148,16 +149,16 @@ async def hot_threads(self, node_id=None, params=None, headers=None): the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. - :arg doc_type: The type to sample. Valid choices: cpu, wait, - block + :arg doc_type: The type to sample. Valid choices are cpu, wait, + block. :arg ignore_idle_threads: Don't show threads that are in known- idle places, such as waiting on a socket select or pulling from an empty - task queue. (default: True) + task queue. Default is True. :arg interval: The interval for the second sampling of threads. - :arg snapshots: Number of samples of thread stacktrace. - (default: 10) + :arg snapshots: Number of samples of thread stacktrace. Default + is 10. :arg threads: Specify the number of threads to provide - information for. (default: 3) + information for. Default is 3. :arg timeout: Operation timeout. """ # type is a reserved word so it cannot be used, use doc_type instead @@ -182,7 +183,7 @@ async def usage(self, node_id=None, metric=None, params=None, headers=None): node you're connecting to, leave empty to get information from all nodes. :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, rest_actions + metrics. Valid choices are _all, rest_actions. :arg timeout: Operation timeout. """ return await self.transport.perform_request( diff --git a/opensearchpy/_async/client/nodes.pyi b/opensearchpy/_async/client/nodes.pyi index b34a7ba9..c18afb83 100644 --- a/opensearchpy/_async/client/nodes.pyi +++ b/opensearchpy/_async/client/nodes.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/plugins.py b/opensearchpy/_async/client/plugins.py index 2b762ba3..19570be4 100644 --- a/opensearchpy/_async/client/plugins.py +++ b/opensearchpy/_async/client/plugins.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -44,7 +45,7 @@ def _dynamic_lookup(self, client): setattr(client, plugin, getattr(self, plugin)) else: warnings.warn( - f"Cannot load `{plugin}` directly to AsyncOpenSearch. `{plugin}` already exists in AsyncOpenSearch. Please use `AsyncOpenSearch.plugin.{plugin}` instead.", + f"Cannot load `{plugin}` directly to {self.client.__class__.__name__} as it already exists. Use `{self.client.__class__.__name__}.plugin.{plugin}` instead.", category=RuntimeWarning, stacklevel=2, ) diff --git a/opensearchpy/_async/client/plugins.pyi b/opensearchpy/_async/client/plugins.pyi index 88383d01..44576c74 100644 --- a/opensearchpy/_async/client/plugins.pyi +++ b/opensearchpy/_async/client/plugins.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/remote.py b/opensearchpy/_async/client/remote.py index 02aa931d..eee7319d 100644 --- a/opensearchpy/_async/client/remote.py +++ b/opensearchpy/_async/client/remote.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/remote.pyi b/opensearchpy/_async/client/remote.pyi index 068c690b..a2d7dc51 100644 --- a/opensearchpy/_async/client/remote.pyi +++ b/opensearchpy/_async/client/remote.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/remote_store.py b/opensearchpy/_async/client/remote_store.py new file mode 100644 index 00000000..e59d1870 --- /dev/null +++ b/opensearchpy/_async/client/remote_store.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + +from .utils import SKIP_IN_PATH, NamespacedClient, query_params + + +class RemoteStoreClient(NamespacedClient): + @query_params("cluster_manager_timeout", "wait_for_completion") + async def restore(self, body, params=None, headers=None): + """ + Restores from remote store. + + + :arg body: Comma-separated list of index IDs + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is false. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return await self.transport.perform_request( + "POST", "/_remotestore/_restore", params=params, headers=headers, body=body + ) diff --git a/opensearchpy/_async/client/remote_store.pyi b/opensearchpy/_async/client/remote_store.pyi new file mode 100644 index 00000000..b14866ef --- /dev/null +++ b/opensearchpy/_async/client/remote_store.pyi @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + +from typing import Any, Collection, MutableMapping, Optional, Tuple, Union + +from .utils import NamespacedClient + +class RemoteStoreClient(NamespacedClient): + async def restore( + self, + *, + body: Any, + cluster_manager_timeout: Optional[Any] = ..., + wait_for_completion: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... diff --git a/opensearchpy/_async/client/security.py b/opensearchpy/_async/client/security.py index bc8e8671..43265506 100644 --- a/opensearchpy/_async/client/security.py +++ b/opensearchpy/_async/client/security.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -7,6 +8,17 @@ # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. + +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -17,25 +29,25 @@ class SecurityClient(NamespacedClient): async def get_account_details(self, params=None, headers=None): """ Returns account details for the current user. + """ return await self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "account"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/account", params=params, headers=headers ) @query_params() async def change_password(self, body, params=None, headers=None): """ Changes the password for the current user. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PUT", - _make_path("_plugins", "_security", "api", "account"), + "/_plugins/_security/api/account", params=params, headers=headers, body=body, @@ -45,10 +57,13 @@ async def change_password(self, body, params=None, headers=None): async def get_action_group(self, action_group, params=None, headers=None): """ Retrieves one action group. + + + :arg action_group: Action group to retrieve. """ if action_group in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'action-group'." + "Empty value passed for a required argument 'action_group'." ) return await self.transport.perform_request( @@ -62,10 +77,11 @@ async def get_action_group(self, action_group, params=None, headers=None): async def get_action_groups(self, params=None, headers=None): """ Retrieves all action groups. + """ return await self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "actiongroups"), + "/_plugins/_security/api/actiongroups/", params=params, headers=headers, ) @@ -73,11 +89,14 @@ async def get_action_groups(self, params=None, headers=None): @query_params() async def delete_action_group(self, action_group, params=None, headers=None): """ - Deletes the specified action group. + Delete a specified action group. + + + :arg action_group: Action group to delete. """ if action_group in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'action-group'." + "Empty value passed for a required argument 'action_group'." ) return await self.transport.perform_request( @@ -91,6 +110,10 @@ async def delete_action_group(self, action_group, params=None, headers=None): async def create_action_group(self, action_group, body, params=None, headers=None): """ Creates or replaces the specified action group. + + + :arg action_group: The name of the action group to create or + replace """ for param in (action_group, body): if param in SKIP_IN_PATH: @@ -108,6 +131,8 @@ async def create_action_group(self, action_group, body, params=None, headers=Non async def patch_action_group(self, action_group, body, params=None, headers=None): """ Updates individual attributes of an action group. + + """ for param in (action_group, body): if param in SKIP_IN_PATH: @@ -125,13 +150,15 @@ async def patch_action_group(self, action_group, body, params=None, headers=None async def patch_action_groups(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple action groups in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "actiongroups"), + "/_plugins/_security/api/actiongroups", params=params, headers=headers, body=body, @@ -140,7 +167,9 @@ async def patch_action_groups(self, body, params=None, headers=None): @query_params() async def get_user(self, username, params=None, headers=None): """ - Retrieves one user. + Retrieve one internal user. + + """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") @@ -155,11 +184,12 @@ async def get_user(self, username, params=None, headers=None): @query_params() async def get_users(self, params=None, headers=None): """ - Retrieves all users. + Retrieve all internal users. + """ return await self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "internalusers"), + "/_plugins/_security/api/internalusers", params=params, headers=headers, ) @@ -167,7 +197,9 @@ async def get_users(self, params=None, headers=None): @query_params() async def delete_user(self, username, params=None, headers=None): """ - Deletes the specified user. + Delete the specified user. + + """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") @@ -183,6 +215,8 @@ async def delete_user(self, username, params=None, headers=None): async def create_user(self, username, body, params=None, headers=None): """ Creates or replaces the specified user. + + """ for param in (username, body): if param in SKIP_IN_PATH: @@ -200,6 +234,8 @@ async def create_user(self, username, body, params=None, headers=None): async def patch_user(self, username, body, params=None, headers=None): """ Updates individual attributes of an internal user. + + """ for param in (username, body): if param in SKIP_IN_PATH: @@ -217,13 +253,15 @@ async def patch_user(self, username, body, params=None, headers=None): async def patch_users(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple internal users in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "internalusers"), + "/_plugins/_security/api/internalusers", params=params, headers=headers, body=body, @@ -233,6 +271,8 @@ async def patch_users(self, body, params=None, headers=None): async def get_role(self, role, params=None, headers=None): """ Retrieves one role. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -248,18 +288,18 @@ async def get_role(self, role, params=None, headers=None): async def get_roles(self, params=None, headers=None): """ Retrieves all roles. + """ return await self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "roles"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/roles/", params=params, headers=headers ) @query_params() async def delete_role(self, role, params=None, headers=None): """ - Deletes the specified role. + Delete the specified role. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -275,6 +315,8 @@ async def delete_role(self, role, params=None, headers=None): async def create_role(self, role, body, params=None, headers=None): """ Creates or replaces the specified role. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -292,6 +334,8 @@ async def create_role(self, role, body, params=None, headers=None): async def patch_role(self, role, body, params=None, headers=None): """ Updates individual attributes of a role. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -309,13 +353,15 @@ async def patch_role(self, role, body, params=None, headers=None): async def patch_roles(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple roles in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "roles"), + "/_plugins/_security/api/roles", params=params, headers=headers, body=body, @@ -325,6 +371,8 @@ async def patch_roles(self, body, params=None, headers=None): async def get_role_mapping(self, role, params=None, headers=None): """ Retrieves one role mapping. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -340,10 +388,11 @@ async def get_role_mapping(self, role, params=None, headers=None): async def get_role_mappings(self, params=None, headers=None): """ Retrieves all role mappings. + """ return await self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "rolesmapping"), + "/_plugins/_security/api/rolesmapping", params=params, headers=headers, ) @@ -352,6 +401,8 @@ async def get_role_mappings(self, params=None, headers=None): async def delete_role_mapping(self, role, params=None, headers=None): """ Deletes the specified role mapping. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -367,6 +418,8 @@ async def delete_role_mapping(self, role, params=None, headers=None): async def create_role_mapping(self, role, body, params=None, headers=None): """ Creates or replaces the specified role mapping. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -384,6 +437,8 @@ async def create_role_mapping(self, role, body, params=None, headers=None): async def patch_role_mapping(self, role, body, params=None, headers=None): """ Updates individual attributes of a role mapping. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -401,13 +456,15 @@ async def patch_role_mapping(self, role, body, params=None, headers=None): async def patch_role_mappings(self, body, params=None, headers=None): """ Creates or updates multiple role mappings in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "rolesmapping"), + "/_plugins/_security/api/rolesmapping", params=params, headers=headers, body=body, @@ -417,6 +474,8 @@ async def patch_role_mappings(self, body, params=None, headers=None): async def get_tenant(self, tenant, params=None, headers=None): """ Retrieves one tenant. + + """ if tenant in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'tenant'.") @@ -432,18 +491,18 @@ async def get_tenant(self, tenant, params=None, headers=None): async def get_tenants(self, params=None, headers=None): """ Retrieves all tenants. + """ return await self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "tenants"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/tenants/", params=params, headers=headers ) @query_params() async def delete_tenant(self, tenant, params=None, headers=None): """ - Deletes the specified tenant. + Delete the specified tenant. + + """ if tenant in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'tenant'.") @@ -459,6 +518,8 @@ async def delete_tenant(self, tenant, params=None, headers=None): async def create_tenant(self, tenant, body, params=None, headers=None): """ Creates or replaces the specified tenant. + + """ for param in (tenant, body): if param in SKIP_IN_PATH: @@ -476,6 +537,8 @@ async def create_tenant(self, tenant, body, params=None, headers=None): async def patch_tenant(self, tenant, body, params=None, headers=None): """ Add, delete, or modify a single tenant. + + """ for param in (tenant, body): if param in SKIP_IN_PATH: @@ -493,13 +556,15 @@ async def patch_tenant(self, tenant, body, params=None, headers=None): async def patch_tenants(self, body, params=None, headers=None): """ Add, delete, or modify multiple tenants in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "tenants"), + "/_plugins/_security/api/tenants/", params=params, headers=headers, body=body, @@ -508,11 +573,12 @@ async def patch_tenants(self, body, params=None, headers=None): @query_params() async def get_configuration(self, params=None, headers=None): """ - Retrieves the current Security plugin configuration in JSON format. + Returns the current Security plugin configuration in JSON format. + """ return await self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "securityconfig"), + "/_plugins/_security/api/securityconfig", params=params, headers=headers, ) @@ -520,14 +586,16 @@ async def get_configuration(self, params=None, headers=None): @query_params() async def update_configuration(self, body, params=None, headers=None): """ - Retrieves the current Security plugin configuration in JSON format. + Adds or updates the existing configuration using the REST API. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PUT", - _make_path("_plugins", "_security", "api", "securityconfig", "config"), + "/_plugins/_security/api/securityconfig/config", params=params, headers=headers, body=body, @@ -536,14 +604,16 @@ async def update_configuration(self, body, params=None, headers=None): @query_params() async def patch_configuration(self, body, params=None, headers=None): """ - Updates the existing configuration using the REST API. + A PATCH call is used to update the existing configuration using the REST API. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "securityconfig"), + "/_plugins/_security/api/securityconfig", params=params, headers=headers, body=body, @@ -555,6 +625,8 @@ async def get_distinguished_names( ): """ Retrieves all distinguished names in the allow list. + + """ return await self.transport.perform_request( "GET", @@ -565,14 +637,18 @@ async def get_distinguished_names( @query_params() async def update_distinguished_names( - self, cluster_name, body, params=None, headers=None + self, cluster_name, body=None, params=None, headers=None ): """ - Adds or updates the specified distinguished names in the cluster's or node's allow list. + Adds or updates the specified distinguished names in the cluster’s or node’s + allow list. + + """ - for param in (cluster_name, body): - if param in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument.") + if cluster_name in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'cluster_name'." + ) return await self.transport.perform_request( "PUT", @@ -585,11 +661,14 @@ async def update_distinguished_names( @query_params() async def delete_distinguished_names(self, cluster_name, params=None, headers=None): """ - Deletes all distinguished names in the specified cluster's or node's allow list. + Deletes all distinguished names in the specified cluster’s or node’s allow + list. + + """ if cluster_name in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'cluster-name'." + "Empty value passed for a required argument 'cluster_name'." ) return await self.transport.perform_request( @@ -602,25 +681,22 @@ async def delete_distinguished_names(self, cluster_name, params=None, headers=No @query_params() async def get_certificates(self, params=None, headers=None): """ - Retrieves the cluster's security certificates. + Retrieves the cluster’s security certificates. + """ return await self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "ssl", "certs"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/ssl/certs", params=params, headers=headers ) @query_params() async def reload_transport_certificates(self, params=None, headers=None): """ - Reloads SSL certificates that are about to expire without restarting the OpenSearch node. + Reload transport layer communication certificates. + """ return await self.transport.perform_request( "PUT", - _make_path( - "_opendistro", "_security", "api", "ssl", "transport", "reloadcerts" - ), + "/_plugins/_security/api/ssl/transport/reloadcerts", params=params, headers=headers, ) @@ -628,11 +704,12 @@ async def reload_transport_certificates(self, params=None, headers=None): @query_params() async def reload_http_certificates(self, params=None, headers=None): """ - Reloads SSL certificates that are about to expire without restarting the OpenSearch node. + Reload HTTP layer communication certificates. + """ return await self.transport.perform_request( "PUT", - _make_path("_opendistro", "_security", "api", "ssl", "http", "reloadcerts"), + "/_plugins/_security/api/ssl/http/reloadcerts", params=params, headers=headers, ) @@ -641,12 +718,10 @@ async def reload_http_certificates(self, params=None, headers=None): async def flush_cache(self, params=None, headers=None): """ Flushes the Security plugin user, authentication, and authorization cache. + """ return await self.transport.perform_request( - "DELETE", - _make_path("_plugins", "_security", "api", "cache"), - params=params, - headers=headers, + "DELETE", "/_plugins/_security/api/cache", params=params, headers=headers ) @query_params() @@ -662,13 +737,11 @@ async def health(self, params=None, headers=None): @query_params() async def get_audit_configuration(self, params=None, headers=None): """ - A GET call retrieves the audit configuration. + Retrieves the audit configuration. + """ return await self.transport.perform_request( - "GET", - _make_path("_opendistro", "_security", "api", "audit"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/audit", params=params, headers=headers ) @query_params() @@ -676,6 +749,7 @@ async def update_audit_configuration(self, body, params=None, headers=None): """ Updates the audit configuration. + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -692,13 +766,33 @@ async def update_audit_configuration(self, body, params=None, headers=None): async def patch_audit_configuration(self, body, params=None, headers=None): """ A PATCH call is used to update specified fields in the audit configuration. + + + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return await self.transport.perform_request( + "PATCH", + "/_plugins/_security/api/audit", + params=params, + headers=headers, + body=body, + ) + + @query_params() + async def patch_distinguished_names(self, body, params=None, headers=None): + """ + Bulk update of distinguished names. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return await self.transport.perform_request( "PATCH", - _make_path("_opendistro", "_security", "api", "audit"), + "/_plugins/_security/api/nodesdn", params=params, headers=headers, body=body, diff --git a/opensearchpy/_async/client/security.pyi b/opensearchpy/_async/client/security.pyi index 7840445a..b3010b3b 100644 --- a/opensearchpy/_async/client/security.pyi +++ b/opensearchpy/_async/client/security.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -6,191 +7,734 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. + +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union -from .utils import NamespacedClient as NamespacedClient +from .utils import NamespacedClient class SecurityClient(NamespacedClient): async def get_account_details( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def change_password( self, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_action_group( self, action_group: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_action_groups( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_action_group( self, action_group: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def create_action_group( self, action_group: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_action_group( self, action_group: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_action_groups( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_user( self, username: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_users( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_user( self, username: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def create_user( self, username: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_user( self, username: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_users( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_roles( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def create_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_roles( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_role_mappings( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def create_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_role_mappings( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_tenant( self, tenant: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_tenants( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_tenant( self, tenant: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def create_tenant( self, tenant: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_tenant( self, tenant: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_tenants( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_configuration( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def update_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_distinguished_names( self, - cluster_name: Union[Any, None] = ..., - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + cluster_name: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def update_distinguished_names( self, cluster_name: Any, - body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + body: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def delete_distinguished_names( self, cluster_name: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def reload_transport_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def reload_http_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def flush_cache( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def health( self, @@ -209,7 +753,20 @@ class SecurityClient(NamespacedClient): headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def get_audit_configuration( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def update_audit_configuration( self, @@ -229,5 +786,36 @@ class SecurityClient(NamespacedClient): headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... async def patch_audit_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + async def patch_distinguished_names( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... diff --git a/opensearchpy/_async/client/snapshot.py b/opensearchpy/_async/client/snapshot.py index 4f2acd6a..f9960b64 100644 --- a/opensearchpy/_async/client/snapshot.py +++ b/opensearchpy/_async/client/snapshot.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -25,25 +26,36 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class SnapshotClient(NamespacedClient): - @query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion") + @query_params("cluster_manager_timeout", "master_timeout", "wait_for_completion") async def create(self, repository, snapshot, body=None, params=None, headers=None): """ Creates a snapshot in a repository. - :arg repository: A repository name - :arg snapshot: A snapshot name + :arg repository: Repository name. + :arg snapshot: Snapshot name. :arg body: The snapshot definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_completion: Should this request wait until the - operation has completed before returning + operation has completed before returning. Default is false. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -57,18 +69,19 @@ async def create(self, repository, snapshot, body=None, params=None, headers=Non body=body, ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") async def delete(self, repository, snapshot, params=None, headers=None): """ Deletes a snapshot. - :arg repository: A repository name - :arg snapshot: A snapshot name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg repository: Repository name. + :arg snapshot: Snapshot name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -82,33 +95,25 @@ async def delete(self, repository, snapshot, params=None, headers=None): ) @query_params( - "ignore_unavailable", - "include_repository", - "index_details", - "master_timeout", - "cluster_manager_timeout", - "verbose", + "cluster_manager_timeout", "ignore_unavailable", "master_timeout", "verbose" ) async def get(self, repository, snapshot, params=None, headers=None): """ Returns information about a snapshot. - :arg repository: A repository name - :arg snapshot: A comma-separated list of snapshot names + :arg repository: Repository name. + :arg snapshot: Comma-separated list of snapshot names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg ignore_unavailable: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is - thrown - :arg include_repository: Whether to include the repository name - in the snapshot info. Defaults to true. - :arg index_details: Whether to include details of each index in - the snapshot, if those details are available. Defaults to false. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + thrown. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg verbose: Whether to show verbose snapshot info or only show - the basic info found in the repository index blob + the basic info found in the repository index blob. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -121,7 +126,7 @@ async def get(self, repository, snapshot, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def delete_repository(self, repository, params=None, headers=None): """ Deletes a repository. @@ -129,11 +134,12 @@ async def delete_repository(self, repository, params=None, headers=None): :arg repository: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -145,38 +151,40 @@ async def delete_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") async def get_repository(self, repository=None, params=None, headers=None): """ Returns information about a repository. - :arg repository: A comma-separated list of repository names + :arg repository: Comma-separated list of repository names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", _make_path("_snapshot", repository), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout", "verify") + @query_params("cluster_manager_timeout", "master_timeout", "timeout", "verify") async def create_repository(self, repository, body, params=None, headers=None): """ Creates a repository. - :arg repository: A repository name + :arg repository: Repository name. :arg body: The repository definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout - :arg verify: Whether to verify the repository after creation + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. + :arg verify: Whether to verify the repository after creation. """ for param in (repository, body): if param in SKIP_IN_PATH: @@ -190,21 +198,22 @@ async def create_repository(self, repository, body, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion") + @query_params("cluster_manager_timeout", "master_timeout", "wait_for_completion") async def restore(self, repository, snapshot, body=None, params=None, headers=None): """ Restores a snapshot. - :arg repository: A repository name - :arg snapshot: A snapshot name + :arg repository: Repository name. + :arg snapshot: Snapshot name. :arg body: Details of what to restore - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_completion: Should this request wait until the - operation has completed before returning + operation has completed before returning. Default is false. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -218,21 +227,22 @@ async def restore(self, repository, snapshot, body=None, params=None, headers=No body=body, ) - @query_params("ignore_unavailable", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "ignore_unavailable", "master_timeout") async def status(self, repository=None, snapshot=None, params=None, headers=None): """ Returns information about the status of a snapshot. - :arg repository: A repository name - :arg snapshot: A comma-separated list of snapshot names + :arg repository: Repository name. + :arg snapshot: Comma-separated list of snapshot names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg ignore_unavailable: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is - thrown - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + thrown. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return await self.transport.perform_request( "GET", @@ -241,18 +251,19 @@ async def status(self, repository=None, snapshot=None, params=None, headers=None headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def verify_repository(self, repository, params=None, headers=None): """ Verifies a repository. - :arg repository: A repository name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg repository: Repository name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -264,18 +275,19 @@ async def verify_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") async def cleanup_repository(self, repository, params=None, headers=None): """ Removes stale data from repository. - :arg repository: A repository name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg repository: Repository name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -287,7 +299,7 @@ async def cleanup_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") async def clone( self, repository, snapshot, target_snapshot, body, params=None, headers=None ): @@ -295,14 +307,15 @@ async def clone( Clones indices from one snapshot into another snapshot in the same repository. - :arg repository: A repository name - :arg snapshot: The name of the snapshot to clone from - :arg target_snapshot: The name of the cloned snapshot to create + :arg repository: Repository name. + :arg snapshot: Snapshot name. + :arg target_snapshot: The name of the cloned snapshot to create. :arg body: The snapshot clone definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (repository, snapshot, target_snapshot, body): if param in SKIP_IN_PATH: @@ -315,56 +328,3 @@ async def clone( headers=headers, body=body, ) - - @query_params( - "blob_count", - "concurrency", - "detailed", - "early_read_node_count", - "max_blob_size", - "max_total_data_size", - "rare_action_probability", - "rarely_abort_writes", - "read_node_count", - "seed", - "timeout", - ) - async def repository_analyze(self, repository, params=None, headers=None): - """ - Analyzes a repository for correctness and performance - - - :arg repository: A repository name - :arg blob_count: Number of blobs to create during the test. - Defaults to 100. - :arg concurrency: Number of operations to run concurrently - during the test. Defaults to 10. - :arg detailed: Whether to return detailed results or a summary. - Defaults to 'false' so that only the summary is returned. - :arg early_read_node_count: Number of nodes on which to perform - an early read on a blob, i.e. before writing has completed. Early reads - are rare actions so the 'rare_action_probability' parameter is also - relevant. Defaults to 2. - :arg max_blob_size: Maximum size of a blob to create during the - test, e.g '1gb' or '100mb'. Defaults to '10mb'. - :arg max_total_data_size: Maximum total size of all blobs to - create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'. - :arg rare_action_probability: Probability of taking a rare - action such as an early read or an overwrite. Defaults to 0.02. - :arg rarely_abort_writes: Whether to rarely abort writes before - they complete. Defaults to 'true'. - :arg read_node_count: Number of nodes on which to read a blob - after writing. Defaults to 10. - :arg seed: Seed for the random number generator used to create - the test workload. Defaults to a random value. - :arg timeout: Explicit operation timeout. Defaults to '30s'. - """ - if repository in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'repository'.") - - return await self.transport.perform_request( - "POST", - _make_path("_snapshot", repository, "_analyze"), - params=params, - headers=headers, - ) diff --git a/opensearchpy/_async/client/snapshot.pyi b/opensearchpy/_async/client/snapshot.pyi index 2167c97f..b219a323 100644 --- a/opensearchpy/_async/client/snapshot.pyi +++ b/opensearchpy/_async/client/snapshot.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -24,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -35,8 +45,8 @@ class SnapshotClient(NamespacedClient): snapshot: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -56,8 +66,8 @@ class SnapshotClient(NamespacedClient): repository: Any, snapshot: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -76,11 +86,9 @@ class SnapshotClient(NamespacedClient): repository: Any, snapshot: Any, *, + cluster_manager_timeout: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., - include_repository: Optional[Any] = ..., - index_details: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., verbose: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -99,8 +107,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -119,9 +127,9 @@ class SnapshotClient(NamespacedClient): self, *, repository: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -140,8 +148,8 @@ class SnapshotClient(NamespacedClient): repository: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., verify: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -163,8 +171,8 @@ class SnapshotClient(NamespacedClient): snapshot: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -184,9 +192,9 @@ class SnapshotClient(NamespacedClient): *, repository: Optional[Any] = ..., snapshot: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -204,8 +212,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -224,8 +232,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -247,36 +255,8 @@ class SnapshotClient(NamespacedClient): target_snapshot: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - async def repository_analyze( - self, - repository: Any, - *, - blob_count: Optional[Any] = ..., - concurrency: Optional[Any] = ..., - detailed: Optional[Any] = ..., - early_read_node_count: Optional[Any] = ..., - max_blob_size: Optional[Any] = ..., - max_total_data_size: Optional[Any] = ..., - rare_action_probability: Optional[Any] = ..., - rarely_abort_writes: Optional[Any] = ..., - read_node_count: Optional[Any] = ..., - seed: Optional[Any] = ..., - timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., diff --git a/opensearchpy/_async/client/tasks.py b/opensearchpy/_async/client/tasks.py index 212b9e56..7efce482 100644 --- a/opensearchpy/_async/client/tasks.py +++ b/opensearchpy/_async/client/tasks.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -57,9 +58,10 @@ async def list(self, params=None, headers=None): :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information. + :arg detailed: Return detailed task information. Default is + false. :arg group_by: Group tasks by nodes or parent/child - relationships. Valid choices: nodes, parents, none + relationships. Valid choices are nodes, parents, none. :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all @@ -68,7 +70,7 @@ async def list(self, params=None, headers=None): (node_id:task_number). Set to -1 to return all. :arg timeout: Operation timeout. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ return await self.transport.perform_request( "GET", "/_tasks", params=params, headers=headers @@ -91,7 +93,7 @@ async def cancel(self, task_id=None, params=None, headers=None): :arg parent_task_id: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ return await self.transport.perform_request( "POST", @@ -110,7 +112,7 @@ async def get(self, task_id=None, params=None, headers=None): (node_id:task_number). :arg timeout: Operation timeout. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ if task_id in SKIP_IN_PATH: warnings.warn( diff --git a/opensearchpy/_async/client/tasks.pyi b/opensearchpy/_async/client/tasks.pyi index 14081a2d..f3cf05d0 100644 --- a/opensearchpy/_async/client/tasks.pyi +++ b/opensearchpy/_async/client/tasks.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/utils.py b/opensearchpy/_async/client/utils.py index b9ea1894..59bedb8e 100644 --- a/opensearchpy/_async/client/utils.py +++ b/opensearchpy/_async/client/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/client/utils.pyi b/opensearchpy/_async/client/utils.pyi index bf88f587..e175d5e2 100644 --- a/opensearchpy/_async/client/utils.pyi +++ b/opensearchpy/_async/client/utils.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/compat.py b/opensearchpy/_async/compat.py index d9c411d4..66c2eca8 100644 --- a/opensearchpy/_async/compat.py +++ b/opensearchpy/_async/compat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/compat.pyi b/opensearchpy/_async/compat.pyi index 60b54b86..290396de 100644 --- a/opensearchpy/_async/compat.pyi +++ b/opensearchpy/_async/compat.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/__init__.py b/opensearchpy/_async/helpers/__init__.py index 6c0097cd..22c54ac8 100644 --- a/opensearchpy/_async/helpers/__init__.py +++ b/opensearchpy/_async/helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/actions.py b/opensearchpy/_async/helpers/actions.py index 323a6668..1f49220f 100644 --- a/opensearchpy/_async/helpers/actions.py +++ b/opensearchpy/_async/helpers/actions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/actions.pyi b/opensearchpy/_async/helpers/actions.pyi index cd6b6974..20cc0661 100644 --- a/opensearchpy/_async/helpers/actions.pyi +++ b/opensearchpy/_async/helpers/actions.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/document.py b/opensearchpy/_async/helpers/document.py index 7f796a86..e71bef46 100644 --- a/opensearchpy/_async/helpers/document.py +++ b/opensearchpy/_async/helpers/document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/document.pyi b/opensearchpy/_async/helpers/document.pyi index 71eb4ef4..f39d5471 100644 --- a/opensearchpy/_async/helpers/document.pyi +++ b/opensearchpy/_async/helpers/document.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/faceted_search.py b/opensearchpy/_async/helpers/faceted_search.py index c6ca4385..86f22e00 100644 --- a/opensearchpy/_async/helpers/faceted_search.py +++ b/opensearchpy/_async/helpers/faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/faceted_search.pyi b/opensearchpy/_async/helpers/faceted_search.pyi index 443e87c5..0e79f1f6 100644 --- a/opensearchpy/_async/helpers/faceted_search.pyi +++ b/opensearchpy/_async/helpers/faceted_search.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/index.py b/opensearchpy/_async/helpers/index.py index c3e5a371..51082dc6 100644 --- a/opensearchpy/_async/helpers/index.py +++ b/opensearchpy/_async/helpers/index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/index.pyi b/opensearchpy/_async/helpers/index.pyi index 5b9d8720..6a89f0d1 100644 --- a/opensearchpy/_async/helpers/index.pyi +++ b/opensearchpy/_async/helpers/index.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/mapping.py b/opensearchpy/_async/helpers/mapping.py index 1ccec472..967c74c8 100644 --- a/opensearchpy/_async/helpers/mapping.py +++ b/opensearchpy/_async/helpers/mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/mapping.pyi b/opensearchpy/_async/helpers/mapping.pyi index 61505f42..91b8d64b 100644 --- a/opensearchpy/_async/helpers/mapping.pyi +++ b/opensearchpy/_async/helpers/mapping.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/search.py b/opensearchpy/_async/helpers/search.py index bd6884cf..73c52971 100644 --- a/opensearchpy/_async/helpers/search.py +++ b/opensearchpy/_async/helpers/search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/search.pyi b/opensearchpy/_async/helpers/search.pyi index 4fb1cd3c..3413c889 100644 --- a/opensearchpy/_async/helpers/search.pyi +++ b/opensearchpy/_async/helpers/search.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/test.py b/opensearchpy/_async/helpers/test.py index c8e43273..895ae991 100644 --- a/opensearchpy/_async/helpers/test.py +++ b/opensearchpy/_async/helpers/test.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/test.pyi b/opensearchpy/_async/helpers/test.pyi index 451bfc14..497d8caf 100644 --- a/opensearchpy/_async/helpers/test.pyi +++ b/opensearchpy/_async/helpers/test.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/update_by_query.py b/opensearchpy/_async/helpers/update_by_query.py index 322b1488..fc9eef54 100644 --- a/opensearchpy/_async/helpers/update_by_query.py +++ b/opensearchpy/_async/helpers/update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/helpers/update_by_query.pyi b/opensearchpy/_async/helpers/update_by_query.pyi index 3c5a9ed7..57d692c6 100644 --- a/opensearchpy/_async/helpers/update_by_query.pyi +++ b/opensearchpy/_async/helpers/update_by_query.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/http_aiohttp.py b/opensearchpy/_async/http_aiohttp.py index cc426164..cab7782e 100644 --- a/opensearchpy/_async/http_aiohttp.py +++ b/opensearchpy/_async/http_aiohttp.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/http_aiohttp.pyi b/opensearchpy/_async/http_aiohttp.pyi index 223fdfff..d641a5eb 100644 --- a/opensearchpy/_async/http_aiohttp.pyi +++ b/opensearchpy/_async/http_aiohttp.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/__init__.py b/opensearchpy/_async/plugins/__init__.py index 6c0097cd..22c54ac8 100644 --- a/opensearchpy/_async/plugins/__init__.py +++ b/opensearchpy/_async/plugins/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/__init__.pyi b/opensearchpy/_async/plugins/__init__.pyi index 6c0097cd..22c54ac8 100644 --- a/opensearchpy/_async/plugins/__init__.pyi +++ b/opensearchpy/_async/plugins/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/alerting.py b/opensearchpy/_async/plugins/alerting.py index d8b27937..be79ed02 100644 --- a/opensearchpy/_async/plugins/alerting.py +++ b/opensearchpy/_async/plugins/alerting.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/alerting.pyi b/opensearchpy/_async/plugins/alerting.pyi index 50392224..7629df93 100644 --- a/opensearchpy/_async/plugins/alerting.pyi +++ b/opensearchpy/_async/plugins/alerting.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/index_management.py b/opensearchpy/_async/plugins/index_management.py index 3be06e6a..ea654bc2 100644 --- a/opensearchpy/_async/plugins/index_management.py +++ b/opensearchpy/_async/plugins/index_management.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/plugins/index_management.pyi b/opensearchpy/_async/plugins/index_management.pyi index cd08954d..98d50097 100644 --- a/opensearchpy/_async/plugins/index_management.pyi +++ b/opensearchpy/_async/plugins/index_management.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/transport.py b/opensearchpy/_async/transport.py index 3db4516c..bf1b77d6 100644 --- a/opensearchpy/_async/transport.py +++ b/opensearchpy/_async/transport.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_async/transport.pyi b/opensearchpy/_async/transport.pyi index cc9406bf..5d66514d 100644 --- a/opensearchpy/_async/transport.pyi +++ b/opensearchpy/_async/transport.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/_version.py b/opensearchpy/_version.py index 82fac929..2410b9f5 100644 --- a/opensearchpy/_version.py +++ b/opensearchpy/_version.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/__init__.py b/opensearchpy/client/__init__.py index 8f976879..6a8dffb6 100644 --- a/opensearchpy/client/__init__.py +++ b/opensearchpy/client/__init__.py @@ -26,6 +26,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from __future__ import unicode_literals import logging @@ -40,6 +50,7 @@ from .nodes import NodesClient from .plugins import PluginsClient from .remote import RemoteClient +from .remote_store import RemoteStoreClient from .security import SecurityClient from .snapshot import SnapshotClient from .tasks import TasksClient @@ -207,6 +218,7 @@ class as kwargs, or a string in the format of ``host[:port]`` which will be self.security = SecurityClient(self) self.snapshot = SnapshotClient(self) self.tasks = TasksClient(self) + self.remote_store = RemoteStoreClient(self) self.features = FeaturesClient(self) @@ -275,25 +287,25 @@ def create(self, index, id, body, params=None, headers=None): with a same ID already exists in the index. - :arg index: The name of the index - :arg id: Document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The document :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the index operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id, body): if param in SKIP_IN_PATH: @@ -320,49 +332,45 @@ def create(self, index, id, body, params=None, headers=None): ) def index(self, index, body, id=None, params=None, headers=None): """ - Creates or overwrites a document in an index. + Creates or updates a document in an index. - :arg index: The name of the index + :arg index: Index name. :arg body: The document - :arg id: Document ID - :arg if_primary_term: only perform the index operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the index operation if the last - operation that has changed the document has the specified sequence - number + :arg id: Document ID. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. :arg op_type: Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests - without an explicit document ID Valid choices: index, create + without an explicit document ID. Valid choices are index, create. :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for + do nothing with refreshes. Valid choices are true, false, wait_for. :arg require_alias: When true, requires destination to be an - alias. Default is false - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte + alias. Default is false. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the index operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return self.transport.perform_request( "POST" if id in SKIP_IN_PATH else "PUT", - _make_path(index, doc_type, id), + _make_path(index, "_doc", id), params=params, headers=headers, body=body, @@ -386,29 +394,29 @@ def bulk(self, body, index=None, params=None, headers=None): :arg body: The operation definition and data (action-data pairs), separated by newlines - :arg index: Default index for items which don't provide one + :arg index: Default index for items which don't provide one. :arg _source: True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub- - request + request. :arg _source_excludes: Default list of fields to exclude from - the returned _source field, can be overridden on each sub-request + the returned _source field, can be overridden on each sub-request. :arg _source_includes: Default list of fields to extract and - return from the _source field, can be overridden on each sub-request + return from the _source field, can be overridden on each sub-request. :arg pipeline: The pipeline id to preprocess incoming documents - with + with. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for + do nothing with refreshes. Valid choices are true, false, wait_for. :arg require_alias: Sets require_alias for all incoming - documents. Defaults to unset (false) - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout + documents. Default is false. + :arg routing: Routing value. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the bulk operation. Defaults - to 1, meaning the primary shard only. Set to `all` for all shard copies, + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total - number of copies for the shard (number of replicas + 1) + number of copies for the shard (number of replicas + 1). Default is 1. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -428,9 +436,9 @@ def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): Explicitly clears the search context for a scroll. - :arg body: A comma-separated list of scroll IDs to clear if none + :arg body: Comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter - :arg scroll_id: A comma-separated list of scroll IDs to clear + :arg scroll_id: Comma-separated list of scroll IDs to clear. """ if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: raise ValueError("You need to supply scroll_id or body.") @@ -464,37 +472,38 @@ def count(self, body=None, index=None, params=None, headers=None): Returns number of documents matching a query. - :arg body: A query to restrict the results specified with the + :arg body: Query to restrict the results specified with the Query DSL (optional) - :arg index: A comma-separated list of indices to restrict the - results + :arg index: Comma-separated list of indices to restrict the + results. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg min_score: Include only documents with a specific `_score` - value in the result + value in the result. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg routing: A comma-separated list of specific routing values - :arg terminate_after: The maximum count for each shard, upon - reaching which the query execution will terminate early + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg routing: Comma-separated list of specific routing values. + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. """ return self.transport.perform_request( "POST", @@ -519,37 +528,33 @@ def delete(self, index, id, params=None, headers=None): Removes a document from the index. - :arg index: The name of the index - :arg id: The document ID - :arg if_primary_term: only perform the delete operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the delete operation if the last - operation that has changed the document has the specified sequence - number + :arg index: Index name. + :arg id: Document ID. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg routing: Routing value. + :arg timeout: Operation timeout. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the delete operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return self.transport.perform_request( - "DELETE", _make_path(index, doc_type, id), params=params, headers=headers + "DELETE", _make_path(index, "_doc", id), params=params, headers=headers ) @query_params( @@ -592,76 +597,76 @@ def delete_by_query(self, index, body, params=None, headers=None): Deletes documents matching the provided query. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg body: The search definition using the Query DSL :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg conflicts: What to do when the delete by query hits version - conflicts? Valid choices: abort, proceed Default: abort + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. + :arg conflicts: What to do when the operation encounters version + conflicts?. Valid choices are abort, proceed. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg from_: Starting offset (default: 0) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg from_: Starting offset. Default is 0. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_docs: Maximum number of documents to process (default: - all documents) + all documents). :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg refresh: Should the effected indexes be refreshed? + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg refresh: Refresh the shard containing the document before + performing the operation. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting + this request or not, defaults to index level setting. :arg requests_per_second: The throttle for this request in sub- - requests per second. -1 means no throttle. - :arg routing: A comma-separated list of specific routing values + requests per second. -1 means no throttle. Default is 0. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg scroll_size: Size on the scroll request powering the delete - by query Default: 100 + should be maintained for scrolled search. + :arg scroll_size: Size on the scroll request powering the + operation. Default is 100. :arg search_timeout: Explicit timeout for each search request. Defaults to no timeout. - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch - :arg size: Deprecated, please use `max_docs` instead + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. + :arg size: Deprecated, please use `max_docs` instead. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 - :arg sort: A comma-separated list of : pairs + set to `auto`. Default is 1. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes + statistical purposes. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m - :arg version: Specify whether to return document version as part - of a hit + shards that are unavailable. Default is 1m. + :arg version: Whether to return document version as part of a + hit. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the delete by query - operation. Defaults to 1, meaning the primary shard only. Set to `all` - for all shard copies, otherwise set to any non-negative value less than - or equal to the total number of copies for the shard (number of replicas - + 1) - :arg wait_for_completion: Should the request should block until - the delete by query is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -686,9 +691,9 @@ def delete_by_query_rethrottle(self, task_id, params=None, headers=None): operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -700,16 +705,19 @@ def delete_by_query_rethrottle(self, task_id, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_script(self, id, params=None, headers=None): """ Deletes a script. - :arg id: Script ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg id: Script ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -735,35 +743,33 @@ def exists(self, index, id, params=None, headers=None): Returns information about whether a document exists in an index. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return self.transport.perform_request( - "HEAD", _make_path(index, doc_type, id), params=params, headers=headers + "HEAD", _make_path(index, "_doc", id), params=params, headers=headers ) @query_params( @@ -782,24 +788,24 @@ def exists_source(self, index, id, params=None, headers=None): Returns information about whether a document source exists in an index. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -830,30 +836,30 @@ def explain(self, index, id, body=None, params=None, headers=None): Returns information about why a specific matches (or doesn't match) a query. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The query definition using the Query DSL :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg analyze_wildcard: Specify whether wildcards and prefix - queries in the query string query should be analyzed (default: false) - :arg analyzer: The analyzer for the query string query + queries in the query string query should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR - :arg df: The default field for query string query (default: - _all) + query (AND or OR). Valid choices are AND, OR. + :arg df: The default field for query string query. Default is + _all. :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -879,19 +885,19 @@ def field_caps(self, body=None, index=None, params=None, headers=None): :arg body: An index filter specified with the Query DSL - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fields: A comma-separated list of field names + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fields: Comma-separated list of field names. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_unmapped: Indicates whether unmapped fields should - be included in the response. + be included in the response. Default is false. """ return self.transport.perform_request( "POST", @@ -918,46 +924,47 @@ def get(self, index, id, params=None, headers=None): Returns a document. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - doc_type = "_doc" - return self.transport.perform_request( - "GET", _make_path(index, doc_type, id), params=params, headers=headers + "GET", _make_path(index, "_doc", id), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") def get_script(self, id, params=None, headers=None): """ Returns a script. - :arg id: Script ID - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + :arg id: Script ID. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") @@ -982,24 +989,24 @@ def get_source(self, index, id, params=None, headers=None): Returns the source of a document. - :arg index: The name of the index - :arg id: The document ID + :arg index: Index name. + :arg id: Document ID. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + performing the operation. + :arg routing: Routing value. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ for param in (index, id): if param in SKIP_IN_PATH: @@ -1027,24 +1034,24 @@ def mget(self, body, index=None, params=None, headers=None): :arg body: Document identifiers; can be either `docs` - (containing full document information) or `ids` (when index and type is - provided in the URL. - :arg index: The name of the index + (containing full document information) or `ids` (when index is provided + in the URL. + :arg index: Index name. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg preference: Specify the node or shard the operation should - be performed on (default: random) + be performed on. Default is random. :arg realtime: Specify whether to perform the operation in - realtime or search mode + realtime or search mode. :arg refresh: Refresh the shard containing the document before - performing the operation - :arg routing: Specific routing value - :arg stored_fields: A comma-separated list of stored fields to - return in the response + performing the operation. + :arg routing: Routing value. + :arg stored_fields: Comma-separated list of stored fields to + return. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1073,30 +1080,31 @@ def msearch(self, body, index=None, params=None, headers=None): :arg body: The request definitions (metadata-search request definition pairs), separated by newlines - :arg index: A comma-separated list of index names to use as - default + :arg index: Comma-separated list of indices to use as default. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg max_concurrent_searches: Controls the maximum number of - concurrent searches the multi search api will execute + concurrent searches the multi search api will execute. :arg max_concurrent_shard_requests: The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in - order to limit the number of concurrent shard requests Default: 5 - :arg pre_filter_shard_size: A threshold that enforces a pre- - filter roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents based - on its rewrite method ie. if date filters are mandatory to match but the - shard bounds and the query are disjoint. + order to limit the number of concurrent shard requests. Default is 5. + :arg pre_filter_shard_size: Threshold that enforces a pre-filter + round-trip to prefilter search shards based on query rewriting if the + number of shards the search request expands to exceeds the threshold. + This filter round-trip can limit the number of shards significantly if + for instance a shard can not match any documents based on its rewrite + method ie. if date filters are mandatory to match but the shard bounds + and the query are disjoint. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + be rendered as an integer or an object in the rest search response. + Default is false. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1124,19 +1132,20 @@ def msearch_template(self, body, index=None, params=None, headers=None): :arg body: The request definitions (metadata-search request definition pairs), separated by newlines - :arg index: A comma-separated list of index names to use as - default + :arg index: Comma-separated list of indices to use as default. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg max_concurrent_searches: Controls the maximum number of - concurrent searches the multi search api will execute + concurrent searches the multi search api will execute. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + be rendered as an integer or an object in the rest search response. + Default is false. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1176,34 +1185,34 @@ def mtermvectors(self, body=None, index=None, params=None, headers=None): :arg field_statistics: Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified - in body "params" or "docs". Default: True - :arg fields: A comma-separated list of fields to return. Applies - to all returned documents unless otherwise specified in body "params" or - "docs". - :arg ids: A comma-separated list of documents ids. You must - define ids as parameter or set "ids" or "docs" in the request body + in body 'params' or 'docs'. Default is True. + :arg fields: Comma-separated list of fields to return. Applies + to all returned documents unless otherwise specified in body 'params' or + 'docs'. + :arg ids: Comma-separated list of documents ids. You must define + ids as parameter or set 'ids' or 'docs' in the request body. :arg offsets: Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg payloads: Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg positions: Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body - "params" or "docs". Default: True + 'params' or 'docs'. Default is True. :arg preference: Specify the node or shard the operation should - be performed on (default: random) .Applies to all returned documents - unless otherwise specified in body "params" or "docs". + be performed on. Applies to all returned documents unless otherwise + specified in body 'params' or 'docs'. Default is random. :arg realtime: Specifies if requests are real-time as opposed to - near-real-time (default: true). - :arg routing: Specific routing value. Applies to all returned - documents unless otherwise specified in body "params" or "docs". + near-real-time. Default is True. + :arg routing: Routing value. Applies to all returned documents + unless otherwise specified in body 'params' or 'docs'. :arg term_statistics: Specifies if total term frequency and document frequency should be returned. Applies to all returned documents - unless otherwise specified in body "params" or "docs". - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + unless otherwise specified in body 'params' or 'docs'. Default is false. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ path = _make_path(index, "_mtermvectors") @@ -1211,18 +1220,21 @@ def mtermvectors(self, body=None, index=None, params=None, headers=None): "POST", path, params=params, headers=headers, body=body ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def put_script(self, id, body, context=None, params=None, headers=None): """ Creates or updates a script. - :arg id: Script ID + :arg id: Script ID. :arg body: The document - :arg context: Context name to compile script against - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg context: Script context. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (id, body): if param in SKIP_IN_PATH: @@ -1242,28 +1254,23 @@ def put_script(self, id, body, context=None, params=None, headers=None): def rank_eval(self, body, index=None, params=None, headers=None): """ Allows to evaluate the quality of ranked search results over a set of typical - search queries - - - .. warning:: + search queries. - This API is **experimental** so may include breaking changes - or be removed in a future version :arg body: The ranking evaluation search definition, including search requests, document ratings and ranking metric definition. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be ignored when unavailable (missing or closed). + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1296,24 +1303,24 @@ def reindex(self, body, params=None, headers=None): :arg body: The search definition using the Query DSL and the prototype for the index request. :arg max_docs: Maximum number of documents to process (default: - all documents) - :arg refresh: Should the affected indexes be refreshed? - :arg requests_per_second: The throttle to set on this request in - sub-requests per second. -1 means no throttle. - :arg scroll: Control how long to keep the search context alive - Default: 5m + all documents). + :arg refresh: Should the affected indexes be refreshed?. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. Default is 0. + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 + set to `auto`. Default is 1. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m + shards that are unavailable. Default is 1m. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the reindex operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) - :arg wait_for_completion: Should the request should block until - the reindex is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1328,9 +1335,9 @@ def reindex_rethrottle(self, task_id, params=None, headers=None): Changes the number of requests per second for a particular Reindex operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -1349,7 +1356,7 @@ def render_search_template(self, body=None, id=None, params=None, headers=None): :arg body: The search definition template and its params - :arg id: The id of the stored search template + :arg id: The id of the stored search template. """ return self.transport.perform_request( "POST", @@ -1362,13 +1369,8 @@ def render_search_template(self, body=None, id=None, params=None, headers=None): @query_params() def scripts_painless_execute(self, body=None, params=None, headers=None): """ - Allows an arbitrary script to be executed and a result to be returned - - - .. warning:: + Allows an arbitrary script to be executed and a result to be returned. - This API is **experimental** so may include breaking changes - or be removed in a future version :arg body: The script to execute """ @@ -1388,11 +1390,12 @@ def scroll(self, body=None, scroll_id=None, params=None, headers=None): :arg body: The scroll ID if not passed by URL or query parameter. - :arg scroll_id: The scroll ID for scrolled search + :arg scroll_id: Scroll ID. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response + be rendered as an integer or an object in the rest search response. + Default is false. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search + should be maintained for scrolled search. """ if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: raise ValueError("You need to supply scroll_id or body.") @@ -1425,7 +1428,6 @@ def scroll(self, body=None, scroll_id=None, params=None, headers=None): "ignore_unavailable", "lenient", "max_concurrent_shard_requests", - "min_compatible_shard_node", "pre_filter_shard_size", "preference", "q", @@ -1456,101 +1458,99 @@ def search(self, body=None, index=None, params=None, headers=None): :arg body: The search definition using the Query DSL - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg allow_partial_search_results: Indicate if an error should - be returned if there is a partial search failure or timeout Default: - True + be returned if there is a partial search failure or timeout. Default is + True. :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg batched_reduce_size: The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. - Default: 512 + Default is 512. :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string - :arg docvalue_fields: A comma-separated list of fields to return - as the docvalue representation of a field for each hit + given in the query string. + :arg docvalue_fields: Comma-separated list of fields to return + as the docvalue representation of a field for each hit. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg explain: Specify whether to return detailed information - about score computation as part of a hit - :arg from_: Starting offset (default: 0) + about score computation as part of a hit. + :arg from_: Starting offset. Default is 0. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_concurrent_shard_requests: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order - to limit the number of concurrent shard requests Default: 5 - :arg min_compatible_shard_node: The minimum compatible version - that all shards involved in search should have for this request to be - successful - :arg pre_filter_shard_size: A threshold that enforces a pre- - filter roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents based - on its rewrite method ie. if date filters are mandatory to match but the - shard bounds and the query are disjoint. + to limit the number of concurrent shard requests. Default is 5. + :arg pre_filter_shard_size: Threshold that enforces a pre-filter + round-trip to prefilter search shards based on query rewriting if the + number of shards the search request expands to exceeds the threshold. + This filter round-trip can limit the number of shards significantly if + for instance a shard can not match any documents based on its rewrite + method ie. if date filters are mandatory to match but the shard bounds + and the query are disjoint. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting + this request or not, defaults to index level setting. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg routing: A comma-separated list of specific routing values + be rendered as an integer or an object in the rest search response. + Default is false. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be maintained for scrolled search. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. :arg seq_no_primary_term: Specify whether to return sequence - number and primary term of the last modification of each hit - :arg size: Number of hits to return (default: 10) - :arg sort: A comma-separated list of : pairs + number and primary term of the last modification of each hit. + :arg size: Number of hits to return. Default is 10. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes - :arg stored_fields: A comma-separated list of stored fields to - return as part of a hit - :arg suggest_field: Specify which field to use for suggestions - :arg suggest_mode: Specify suggest mode Valid choices: missing, - popular, always Default: missing - :arg suggest_size: How many suggestions to return in response + statistical purposes. + :arg stored_fields: Comma-separated list of stored fields to + return. + :arg suggest_field: Specify which field to use for suggestions. + :arg suggest_mode: Specify suggest mode. Valid choices are + missing, popular, always. + :arg suggest_size: How many suggestions to return in response. :arg suggest_text: The source text for which the suggestions - should be returned + should be returned. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - :arg timeout: Explicit operation timeout + :arg timeout: Operation timeout. :arg track_scores: Whether to calculate and return scores even - if they are not used for sorting + if they are not used for sorting. :arg track_total_hits: Indicate if the number of documents that - match the query should be tracked + match the query should be tracked. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response - :arg version: Specify whether to return document version as part - of a hit + should be prefixed by their respective types in the response. + :arg version: Whether to return document version as part of a + hit. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -1578,21 +1578,21 @@ def search_shards(self, index=None, params=None, headers=None): executed against. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg routing: Specific routing value + be performed on. Default is random. + :arg routing: Routing value. """ return self.transport.perform_request( "GET", _make_path(index, "_search_shards"), params=params, headers=headers @@ -1619,35 +1619,37 @@ def search_template(self, body, index=None, params=None, headers=None): :arg body: The search definition template and its params - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg ccs_minimize_roundtrips: Indicates whether network round- trips should be minimized as part of cross-cluster search requests - execution Default: true + execution. Default is True. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg explain: Specify whether to return detailed information - about score computation as part of a hit + about score computation as part of a hit. :arg ignore_throttled: Whether specified concrete, expanded or - aliased indices should be ignored when throttled + aliased indices should be ignored when throttled. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg profile: Specify whether to profile the query execution + be performed on. Default is random. + :arg profile: Specify whether to profile the query execution. :arg rest_total_hits_as_int: Indicates whether hits.total should - be rendered as an integer or an object in the rest search response - :arg routing: A comma-separated list of specific routing values + be rendered as an integer or an object in the rest search response. + Default is false. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch + should be maintained for scrolled search. + :arg search_type: Search operation type. Valid choices are + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch. :arg typed_keys: Specify whether aggregation and suggester names - should be prefixed by their respective types in the response + should be prefixed by their respective types in the response. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -1682,28 +1684,28 @@ def termvectors(self, index, body=None, id=None, params=None, headers=None): :arg index: The index in which the document resides. :arg body: Define parameters and or supply a document to get termvectors for. See documentation. - :arg id: The id of the document, when not specified a doc param - should be supplied. + :arg id: Document ID. When not specified a doc param should be + supplied. :arg field_statistics: Specifies if document count, sum of document frequencies and sum of total term frequencies should be - returned. Default: True - :arg fields: A comma-separated list of fields to return. + returned. Default is True. + :arg fields: Comma-separated list of fields to return. :arg offsets: Specifies if term offsets should be returned. - Default: True + Default is True. :arg payloads: Specifies if term payloads should be returned. - Default: True + Default is True. :arg positions: Specifies if term positions should be returned. - Default: True + Default is True. :arg preference: Specify the node or shard the operation should - be performed on (default: random). + be performed on. Default is random. :arg realtime: Specifies if request is real-time as opposed to - near-real-time (default: true). - :arg routing: Specific routing value. + near-real-time. Default is True. + :arg routing: Routing value. :arg term_statistics: Specifies if total term frequency and - document frequency should be returned. - :arg version: Explicit version number for concurrency control - :arg version_type: Specific version type Valid choices: - internal, external, external_gte, force + document frequency should be returned. Default is false. + :arg version: Explicit version number for concurrency control. + :arg version_type: Specific version type. Valid choices are + internal, external, external_gte, force. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -1733,38 +1735,36 @@ def update(self, index, id, body, params=None, headers=None): Updates a document with a script or partial document. - :arg index: The name of the index - :arg id: Document ID + :arg index: Index name. + :arg id: Document ID. :arg body: The request definition requires either `script` or partial `doc` :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field - :arg if_primary_term: only perform the update operation if the - last operation that has changed the document has the specified primary - term - :arg if_seq_no: only perform the update operation if the last - operation that has changed the document has the specified sequence - number - :arg lang: The script language (default: painless) + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. + :arg if_primary_term: only perform the operation if the last + operation that has changed the document has the specified primary term. + :arg if_seq_no: only perform the operation if the last operation + that has changed the document has the specified sequence number. + :arg lang: The script language. Default is painless. :arg refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then - do nothing with refreshes. Valid choices: true, false, wait_for - :arg require_alias: When true, requires destination is an alias. - Default is false + do nothing with refreshes. Valid choices are true, false, wait_for. + :arg require_alias: When true, requires destination to be an + alias. Default is false. :arg retry_on_conflict: Specify how many times should the - operation be retried when a conflict occurs (default: 0) - :arg routing: Specific routing value - :arg timeout: Explicit operation timeout + operation be retried when a conflict occurs. Default is 0. + :arg routing: Routing value. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the update operation. - Defaults to 1, meaning the primary shard only. Set to `all` for all - shard copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. """ for param in (index, id, body): if param in SKIP_IN_PATH: @@ -1809,7 +1809,6 @@ def update(self, index, id, body, params=None, headers=None): "terminate_after", "timeout", "version", - "version_type", "wait_for_active_shards", "wait_for_completion", ) @@ -1819,81 +1818,77 @@ def update_by_query(self, index, body=None, params=None, headers=None): for example to pick up a mapping change. - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg body: The search definition using the Query DSL - search; leave empty to perform the operation on all types :arg _source: True or false to return the _source field or not, - or a list of fields to return - :arg _source_excludes: A list of fields to exclude from the - returned _source field - :arg _source_includes: A list of fields to extract and return - from the _source field + or a list of fields to return. + :arg _source_excludes: List of fields to exclude from the + returned _source field. + :arg _source_includes: List of fields to extract and return from + the _source field. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg conflicts: What to do when the update by query hits version - conflicts? Valid choices: abort, proceed Default: abort + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. + :arg conflicts: What to do when the operation encounters version + conflicts?. Valid choices are abort, proceed. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg from_: Starting offset (default: 0) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg from_: Starting offset. Default is 0. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored + as providing text to a numeric field) should be ignored. :arg max_docs: Maximum number of documents to process (default: - all documents) - :arg pipeline: Ingest pipeline to set on index requests made by - this action. (default: none) + all documents). + :arg pipeline: The pipeline id to preprocess incoming documents + with. :arg preference: Specify the node or shard the operation should - be performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg refresh: Should the affected indexes be refreshed? + be performed on. Default is random. + :arg q: Query in the Lucene query string syntax. + :arg refresh: Should the affected indexes be refreshed?. :arg request_cache: Specify if request cache should be used for - this request or not, defaults to index level setting - :arg requests_per_second: The throttle to set on this request in - sub-requests per second. -1 means no throttle. - :arg routing: A comma-separated list of specific routing values + this request or not, defaults to index level setting. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. Default is 0. + :arg routing: Comma-separated list of specific routing values. :arg scroll: Specify how long a consistent view of the index - should be maintained for scrolled search - :arg scroll_size: Size on the scroll request powering the update - by query Default: 100 + should be maintained for scrolled search. + :arg scroll_size: Size on the scroll request powering the + operation. Default is 100. :arg search_timeout: Explicit timeout for each search request. Defaults to no timeout. - :arg search_type: Search operation type Valid choices: - query_then_fetch, dfs_query_then_fetch - :arg size: Deprecated, please use `max_docs` instead + :arg search_type: Search operation type. Valid choices are + query_then_fetch, dfs_query_then_fetch. + :arg size: Deprecated, please use `max_docs` instead. :arg slices: The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be - set to `auto`. Default: 1 - :arg sort: A comma-separated list of : pairs + set to `auto`. Default is 1. + :arg sort: Comma-separated list of : pairs. :arg stats: Specific 'tag' of the request for logging and - statistical purposes + statistical purposes. :arg terminate_after: The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. :arg timeout: Time each individual bulk request should wait for - shards that are unavailable. Default: 1m - :arg version: Specify whether to return document version as part - of a hit - :arg version_type: Should the document increment the version - number (internal) on hit or not (reindex) + shards that are unavailable. Default is 1m. + :arg version: Whether to return document version as part of a + hit. :arg wait_for_active_shards: Sets the number of shard copies - that must be active before proceeding with the update by query - operation. Defaults to 1, meaning the primary shard only. Set to `all` - for all shard copies, otherwise set to any non-negative value less than - or equal to the total number of copies for the shard (number of replicas - + 1) - :arg wait_for_completion: Should the request should block until - the update by query operation is complete. Default: True + that must be active before proceeding with the operation. Defaults to 1, + meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1). Default is 1. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is True. """ # from is a reserved word so it cannot be used, use from_ instead if "from_" in params: @@ -1917,9 +1912,9 @@ def update_by_query_rethrottle(self, task_id, params=None, headers=None): operation. - :arg task_id: The task id to rethrottle - :arg requests_per_second: The throttle to set on this request in - floating sub-requests per second. -1 means set no throttle. + :arg task_id: The task id to rethrottle. + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") @@ -1936,11 +1931,6 @@ def get_script_context(self, params=None, headers=None): """ Returns all script contexts. - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version """ return self.transport.perform_request( "GET", "/_script_context", params=params, headers=headers @@ -1949,13 +1939,8 @@ def get_script_context(self, params=None, headers=None): @query_params() def get_script_languages(self, params=None, headers=None): """ - Returns available script types, languages and contexts - - - .. warning:: + Returns available script types, languages and contexts. - This API is **experimental** so may include breaking changes - or be removed in a future version """ return self.transport.perform_request( "GET", "/_script_language", params=params, headers=headers @@ -1978,11 +1963,11 @@ def create_pit(self, index, params=None, headers=None): :arg allow_partial_pit_creation: Allow if point in time can be created with partial failures. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg keep_alive: Specify the keep alive for point in time. :arg preference: Specify the node or shard the operation should - be performed on. + be performed on. Default is random. :arg routing: Comma-separated list of specific routing values. """ if index in SKIP_IN_PATH: @@ -2011,7 +1996,7 @@ def delete_pit(self, body=None, params=None, headers=None): Deletes one or more point in time searches based on the IDs passed. - :arg body: a point-in-time id to delete + :arg body: The point-in-time ids to be deleted """ return self.transport.perform_request( "DELETE", @@ -2025,36 +2010,8 @@ def delete_pit(self, body=None, params=None, headers=None): def get_all_pits(self, params=None, headers=None): """ Lists all active point in time searches. - """ - return self.transport.perform_request( - "GET", "/_search/point_in_time/_all", params=params, headers=headers - ) - - @query_params() - def terms_enum(self, index, body=None, params=None, headers=None): - """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto- - complete scenarios. - - - .. warning:: - This API is **beta** so may include breaking changes - or be removed in a future version - - :arg index: A comma-separated list of index names to search; use - `_all` or empty string to perform the operation on all indices - :arg body: field name, string which is the prefix expected in - matching terms, timeout and size for max number of results """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( - "POST", - _make_path(index, "_terms_enum"), - params=params, - headers=headers, - body=body, + "GET", "/_search/point_in_time/_all", params=params, headers=headers ) diff --git a/opensearchpy/client/__init__.pyi b/opensearchpy/client/__init__.pyi index e1d1e359..9ad72a83 100644 --- a/opensearchpy/client/__init__.pyi +++ b/opensearchpy/client/__init__.pyi @@ -25,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from __future__ import unicode_literals import logging @@ -39,6 +48,7 @@ from .indices import IndicesClient from .ingest import IngestClient from .nodes import NodesClient from .remote import RemoteClient +from .remote_store import RemoteStoreClient from .security import SecurityClient from .snapshot import SnapshotClient from .tasks import TasksClient @@ -58,8 +68,12 @@ class OpenSearch(object): security: SecurityClient snapshot: SnapshotClient tasks: TasksClient + remote_store: RemoteStoreClient def __init__( - self, hosts: Any = ..., transport_class: Type[Transport] = ..., **kwargs: Any + self, + hosts: Any = ..., + transport_class: Type[Transport] = ..., + **kwargs: Any, ) -> None: ... def __repr__(self) -> str: ... def __enter__(self) -> "OpenSearch": ... @@ -330,8 +344,8 @@ class OpenSearch(object): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -487,8 +501,8 @@ class OpenSearch(object): self, id: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -639,8 +653,8 @@ class OpenSearch(object): *, body: Any, context: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -799,7 +813,6 @@ class OpenSearch(object): ignore_unavailable: Optional[Any] = ..., lenient: Optional[Any] = ..., max_concurrent_shard_requests: Optional[Any] = ..., - min_compatible_shard_node: Optional[Any] = ..., pre_filter_shard_size: Optional[Any] = ..., preference: Optional[Any] = ..., q: Optional[Any] = ..., @@ -988,7 +1001,6 @@ class OpenSearch(object): terminate_after: Optional[Any] = ..., timeout: Optional[Any] = ..., version: Optional[Any] = ..., - version_type: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -1125,21 +1137,3 @@ class OpenSearch(object): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def terms_enum( - self, - index: Any, - *, - body: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/opensearchpy/client/_patch.py b/opensearchpy/client/_patch.py index d92eae5a..bbb69d52 100644 --- a/opensearchpy/client/_patch.py +++ b/opensearchpy/client/_patch.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/_patch.pyi b/opensearchpy/client/_patch.pyi index be6e12a0..b1819682 100644 --- a/opensearchpy/client/_patch.pyi +++ b/opensearchpy/client/_patch.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -6,6 +7,7 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. + from typing import Any, Collection, MutableMapping, Optional, Tuple, Type, Union def list_all_point_in_time( diff --git a/opensearchpy/client/cat.py b/opensearchpy/client/cat.py index 8dac68cd..49d797ca 100644 --- a/opensearchpy/client/cat.py +++ b/opensearchpy/client/cat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -48,17 +49,17 @@ def aliases(self, name=None, params=None, headers=None): :arg name: Comma-separated list of alias names. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers @@ -83,22 +84,22 @@ def allocation(self, node_id=None, params=None, headers=None): :arg node_id: Comma-separated list of node IDs or names to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", @@ -119,10 +120,10 @@ def count(self, index=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "count", index), params=params, headers=headers @@ -137,13 +138,13 @@ def health(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg ts: Set to false to disable timestamping. (default: True) - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg ts: Set to false to disable timestamping. Default is True. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/health", params=params, headers=headers @@ -155,7 +156,7 @@ def help(self, params=None, headers=None): Returns help for the Cat APIs. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. """ @@ -187,35 +188,35 @@ def indices(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. :arg health: Health status ('green', 'yellow', or 'red') to - filter only indices matching the specified health status. Valid - choices: green, yellow, red - :arg help: Return help information. (default: false) + filter only indices matching the specified health status. Valid choices + are green, yellow, red. + :arg help: Return help information. Default is false. :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory. (default: false) + memory. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg pri: Set to true to return stats only for primary shards. - (default: false) + Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "indices", index), params=params, headers=headers @@ -241,15 +242,15 @@ def master(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ from warnings import warn @@ -280,15 +281,15 @@ def cluster_manager(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/cluster_manager", params=params, headers=headers @@ -312,27 +313,27 @@ def nodes(self, params=None, headers=None): Returns basic statistics about performance of cluster nodes. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg full_id: Return the full node ID instead of the shortened - version. (default: false) + version. Default is false. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local (Deprecated: This parameter does not cause this API - to act locally): Return local information, do not retrieve the state - from cluster-manager node. (default: false) + to act locally.): Return local information, do not retrieve the state + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/nodes", params=params, headers=headers @@ -349,20 +350,20 @@ def recovery(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - shard recoveries. (default: false) - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + shard recoveries. Default is false. + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg detailed: If `true`, the response includes detailed - information about shard recoveries. (default: false) + information about shard recoveries. Default is false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "recovery", index), params=params, headers=headers @@ -387,24 +388,24 @@ def shards(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "shards", index), params=params, headers=headers @@ -427,20 +428,20 @@ def segments(self, index=None, params=None, headers=None): :arg index: Comma-separated list of indices to limit the returned information. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "segments", index), params=params, headers=headers @@ -467,17 +468,17 @@ def pending_tasks(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/pending_tasks", params=params, headers=headers @@ -507,16 +508,16 @@ def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. :arg size: The multiplier in which to display values. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", @@ -534,15 +535,15 @@ def fielddata(self, fields=None, params=None, headers=None): :arg fields: Comma-separated list of fields to return in the output. - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", @@ -571,15 +572,15 @@ def plugins(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/plugins", params=params, headers=headers @@ -605,15 +606,15 @@ def nodeattrs(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/nodeattrs", params=params, headers=headers @@ -639,15 +640,15 @@ def repositories(self, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/repositories", params=params, headers=headers @@ -675,17 +676,18 @@ def snapshots(self, repository=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed). (default: false) + should be ignored when unavailable (missing or closed). Default is + false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", @@ -714,12 +716,12 @@ def tasks(self, params=None, headers=None): :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information. (default: - false) + :arg detailed: Return detailed task information. Default is + false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all @@ -728,9 +730,9 @@ def tasks(self, params=None, headers=None): (node_id:task_number). Set to -1 to return all. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", "/_cat/tasks", params=params, headers=headers @@ -757,15 +759,15 @@ def templates(self, name=None, params=None, headers=None): :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg s: Comma-separated list of column names or column aliases to sort by. - :arg v: Verbose mode. Display column headers. (default: false) + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", _make_path("_cat", "templates", name), params=params, headers=headers @@ -787,7 +789,6 @@ def pit_segments(self, body=None, params=None, headers=None): List segments for one or several PITs. - :arg body: """ return self.transport.perform_request( "GET", "/_cat/pit_segments", params=params, headers=headers, body=body @@ -815,23 +816,23 @@ def segment_replication(self, index=None, params=None, headers=None): :arg index: Comma-separated list or wildcard expression of index names to limit the returned information. :arg active_only: If `true`, the response only includes ongoing - segment replication events. (default: false) - :arg bytes: The unit in which to display byte values. Valid - choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + segment replication events. Default is false. + :arg bytes: The unit in which to display byte values. Valid + choices are b, k, kb, m, mb, g, gb, t, tb, p, pb. :arg completed_only: If `true`, the response only includes - latest completed segment replication events. (default: false) + latest completed segment replication events. Default is false. :arg detailed: If `true`, the response includes detailed - information about segment replications. (default: false) + information about segment replications. Default is false. :arg format: A short version of the Accept header, e.g. json, yaml. :arg h: Comma-separated list of column names to display. - :arg help: Return help information. (default: false) + :arg help: Return help information. Default is false. :arg s: Comma-separated list of column names or column aliases to sort by. :arg shards: Comma-separated list of shards to display. - :arg time: The unit in which to display time values. Valid - choices: d, h, m, s, ms, micros, nanos - :arg v: Verbose mode. Display column headers. (default: false) + :arg time: The unit in which to display time values. Valid + choices are d, h, m, s, ms, micros, nanos. + :arg v: Verbose mode. Display column headers. Default is false. """ return self.transport.perform_request( "GET", diff --git a/opensearchpy/client/cat.pyi b/opensearchpy/client/cat.pyi index fc076ef8..0d690dda 100644 --- a/opensearchpy/client/cat.pyi +++ b/opensearchpy/client/cat.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/cluster.py b/opensearchpy/client/cluster.py index 28f1f0e8..248c7ce3 100644 --- a/opensearchpy/client/cluster.py +++ b/opensearchpy/client/cluster.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -65,22 +66,22 @@ def health(self, index=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg level: Specify the level of detail for returned - information. Valid choices: cluster, indices, shards, - awareness_attributes + information. Valid choices are cluster, indices, shards, + awareness_attributes. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. :arg wait_for_active_shards: Wait until the specified number of shards is active. :arg wait_for_events: Wait until all currently queued events - with the given priority are processed. Valid choices: immediate, - urgent, high, normal, low, languid + with the given priority are processed. Valid choices are immediate, + urgent, high, normal, low, languid. :arg wait_for_no_initializing_shards: Whether to wait until there are no initializing shards in the cluster. :arg wait_for_no_relocating_shards: Whether to wait until there @@ -88,7 +89,7 @@ def health(self, index=None, params=None, headers=None): :arg wait_for_nodes: Wait until the specified number of nodes is available. :arg wait_for_status: Wait until cluster is in a specific state. - Valid choices: green, yellow, red + Valid choices are green, yellow, red. """ return self.transport.perform_request( "GET", @@ -107,10 +108,10 @@ def pending_tasks(self, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", "/_cluster/pending_tasks", params=params, headers=headers @@ -133,8 +134,8 @@ def state(self, metric=None, index=None, params=None, headers=None): :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, blocks, metadata, nodes, routing_table, - routing_nodes, master_node, cluster_manager_node, version + metrics. Valid choices are _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, cluster_manager_node, version. :arg index: Comma-separated list of indices; use `_all` or empty string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices @@ -143,17 +144,17 @@ def state(self, metric=None, index=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: all, - open, closed, hidden, none - :arg flat_settings: Return settings in flat format. (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version. :arg wait_for_timeout: The maximum time to wait for @@ -179,8 +180,8 @@ def stats(self, node_id=None, params=None, headers=None): the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg timeout: Operation timeout. """ return self.transport.perform_request( @@ -215,8 +216,8 @@ def reroute(self, body=None, params=None, headers=None): :arg explain: Return an explanation of why the commands can or cannot be executed. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg metric: Limit the information returned to the specified metrics. Defaults to all but metadata. :arg retry_failed: Retries allocation of shards that are blocked @@ -241,13 +242,13 @@ def get_settings(self, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg include_defaults: Whether to return all default clusters - setting. (default: false) + setting. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ return self.transport.perform_request( @@ -266,11 +267,11 @@ def put_settings(self, body, params=None, headers=None): or `persistent` (survives cluster restart). :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. - :arg flat_settings: Return settings in flat format. (default: - false) + :arg flat_settings: Return settings in flat format. Default is + false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: @@ -299,9 +300,9 @@ def allocation_explain(self, body=None, params=None, headers=None): :arg body: The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' :arg include_disk_info: Return information about disk usage and - shard sizes. (default: false) + shard sizes. Default is false. :arg include_yes_decisions: Return 'YES' decisions in - explanation. (default: false) + explanation. Default is false. """ return self.transport.perform_request( "POST", @@ -321,8 +322,8 @@ def delete_component_template(self, name, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: @@ -345,10 +346,10 @@ def get_component_template(self, name=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", @@ -368,10 +369,10 @@ def put_component_template(self, name, body, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one. (default: false) + new or can also replace an existing one. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ for param in (name, body): @@ -386,18 +387,20 @@ def put_component_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("local", "master_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") def exists_component_template(self, name, params=None, headers=None): """ Returns information about whether a particular component template exist. :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster-manager node. (default: false) + from cluster-manager node. Default is false. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -417,7 +420,7 @@ def delete_voting_config_exclusions(self, params=None, headers=None): :arg wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting - configuration exclusions list. (default: True) + configuration exclusions list. Default is True. """ return self.transport.perform_request( "DELETE", diff --git a/opensearchpy/client/cluster.pyi b/opensearchpy/client/cluster.pyi index ccc3737a..ad2d3fac 100644 --- a/opensearchpy/client/cluster.pyi +++ b/opensearchpy/client/cluster.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -300,6 +301,7 @@ class ClusterClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., diff --git a/opensearchpy/client/dangling_indices.py b/opensearchpy/client/dangling_indices.py index b04698ad..adc4aea3 100644 --- a/opensearchpy/client/dangling_indices.py +++ b/opensearchpy/client/dangling_indices.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -53,8 +54,8 @@ def delete_dangling_index(self, index_uuid, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: @@ -81,8 +82,8 @@ def import_dangling_index(self, index_uuid, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if index_uuid in SKIP_IN_PATH: diff --git a/opensearchpy/client/dangling_indices.pyi b/opensearchpy/client/dangling_indices.pyi index 203805a1..b48ba830 100644 --- a/opensearchpy/client/dangling_indices.pyi +++ b/opensearchpy/client/dangling_indices.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/features.py b/opensearchpy/client/features.py index a9e6ab95..b96ea308 100644 --- a/opensearchpy/client/features.py +++ b/opensearchpy/client/features.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/features.pyi b/opensearchpy/client/features.pyi index 8da34e42..6abcd79e 100644 --- a/opensearchpy/client/features.pyi +++ b/opensearchpy/client/features.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/indices.py b/opensearchpy/client/indices.py index 138692e1..3f8df6c6 100644 --- a/opensearchpy/client/indices.py +++ b/opensearchpy/client/indices.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -25,6 +26,16 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -38,7 +49,7 @@ def analyze(self, body=None, index=None, params=None, headers=None): :arg body: Define analyzer/tokenizer parameters and the text on which the analysis should be performed - :arg index: The name of the index to scope the operation + :arg index: The name of the index to scope the operation. """ return self.transport.perform_request( "POST", @@ -54,16 +65,16 @@ def refresh(self, index=None, params=None, headers=None): Performs the refresh operation in one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). """ return self.transport.perform_request( "POST", _make_path(index, "_refresh"), params=params, headers=headers @@ -81,44 +92,47 @@ def flush(self, index=None, params=None, headers=None): Performs the flush operation on one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string for all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg force: Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as - internal) + internal). :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg wait_if_ongoing: If set to true the flush operation will block until the flush can be executed if another flush operation is - already executing. The default is true. If set to false the flush will - be skipped iff if another flush operation is already running. + already executing. If set to false the flush will be skipped iff if + another flush operation is already running. Default is True. """ return self.transport.perform_request( "POST", _make_path(index, "_flush"), params=params, headers=headers ) @query_params( - "master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards" + "cluster_manager_timeout", "master_timeout", "timeout", "wait_for_active_shards" ) def create(self, index, body=None, params=None, headers=None): """ Creates an index with optional settings and mappings. - :arg index: The name of the index + :arg index: Index name. :arg body: The configuration for the index (`settings` and `mappings`) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for before the operation returns. """ @@ -130,20 +144,23 @@ def create(self, index, body=None, params=None, headers=None): ) @query_params( - "master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards" + "cluster_manager_timeout", "master_timeout", "timeout", "wait_for_active_shards" ) def clone(self, index, target, body=None, params=None, headers=None): """ - Clones an index + Clones an index. - :arg index: The name of the source index to clone - :arg target: The name of the target index to clone into + :arg index: The name of the source index to clone. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the cloned index before the operation returns. """ @@ -161,35 +178,40 @@ def clone(self, index, target, body=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "include_defaults", "local", "master_timeout", - "cluster_manager_timeout", ) def get(self, index, params=None, headers=None): """ Returns information about one or more indices. - :arg index: A comma-separated list of index names - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) + :arg index: Comma-separated list of indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -200,10 +222,10 @@ def get(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -212,18 +234,21 @@ def open(self, index, params=None, headers=None): Opens an index. - :arg index: A comma separated list of indices to open + :arg index: Comma-separated list of indices to open. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of active shards to wait for before the operation returns. """ @@ -236,10 +261,10 @@ def open(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -248,22 +273,23 @@ def close(self, index, params=None, headers=None): Closes an index. - :arg index: A comma separated list of indices to close + :arg index: Comma-separated list of indices to close. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. Set to `index-setting` to wait - according to the index setting `index.write.wait_for_active_shards`, or - `all` to wait for all shards, or an integer. Defaults to `0`. + wait for before the operation returns. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -274,10 +300,10 @@ def close(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", ) def delete(self, index, params=None, headers=None): @@ -285,18 +311,23 @@ def delete(self, index, params=None, headers=None): Deletes an index. - :arg index: A comma-separated list of indices to delete; use - `_all` or `*` string to delete all indices - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg index: Comma-separated list of indices to delete; use + `_all` or `*` string to delete all indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -318,20 +349,22 @@ def exists(self, index, params=None, headers=None): Returns information about whether a particular index exists. - :arg index: A comma-separated list of index names - :arg allow_no_indices: Ignore if a wildcard expression resolves - to no concrete indices (default: false) - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) - :arg ignore_unavailable: Ignore unavailable indexes (default: - false) + :arg index: Comma-separated list of indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified). Default is false. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed). Default is + false. :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") @@ -342,10 +375,10 @@ def exists(self, index, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", "write_index_only", ) @@ -355,22 +388,24 @@ def put_mapping(self, body, index=None, params=None, headers=None): :arg body: The mapping definition - :arg index: A comma-separated list of index names the mapping - should be added to (supports wildcards); use `_all` or omit to add the - mapping on all indices. + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg write_index_only: When true, applies mappings only to the - write index of an alias or data stream + write index of an alias or data stream. Default is false. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -388,36 +423,37 @@ def put_mapping(self, body, index=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "local", "master_timeout", - "cluster_manager_timeout", ) def get_mapping(self, index=None, params=None, headers=None): """ Returns mappings for one or more indices. - :arg index: A comma-separated list of index names + :arg index: Comma-separated list of indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + should be ignored when unavailable (missing or closed). + :arg local (Deprecated: This parameter is a no-op and field + mappings are always retrieved locally.): Return local information, do + not retrieve the state from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( - "GET", - _make_path(index, "_mapping"), - params=params, - headers=headers, + "GET", _make_path(index, "_mapping"), params=params, headers=headers ) @query_params( @@ -432,20 +468,20 @@ def get_field_mapping(self, fields, index=None, params=None, headers=None): Returns mapping for one or more fields. - :arg fields: A comma-separated list of fields - :arg index: A comma-separated list of index names + :arg fields: Comma-separated list of fields. + :arg index: Comma-separated list of indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_defaults: Whether the default mapping values should - be returned as well + be returned as well. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'fields'.") @@ -457,21 +493,23 @@ def get_field_mapping(self, fields, index=None, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def put_alias(self, index, name, body=None, params=None, headers=None): """ Creates or updates an alias. - :arg index: A comma-separated list of index names the alias - should point to (supports wildcards); use `_all` to perform the - operation on all indices. - :arg name: The name of the alias to be created or updated + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: The name of the alias to be created or updated. :arg body: The settings for the alias, such as `routing` or `filter` - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit timestamp for the document + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, name): if param in SKIP_IN_PATH: @@ -491,19 +529,18 @@ def exists_alias(self, name, index=None, params=None, headers=None): Returns information about whether a particular alias exists. - :arg name: A comma-separated list of alias names to return - :arg index: A comma-separated list of index names to filter - aliases + :arg name: Comma-separated list of alias names. + :arg index: Comma-separated list of indices to filter aliases. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -518,34 +555,36 @@ def get_alias(self, index=None, name=None, params=None, headers=None): Returns an alias. - :arg index: A comma-separated list of index names to filter - aliases - :arg name: A comma-separated list of alias names to return + :arg index: Comma-separated list of indices to filter aliases. + :arg name: Comma-separated list of alias names. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) + from cluster-manager node. Default is false. """ return self.transport.perform_request( "GET", _make_path(index, "_alias", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def update_aliases(self, body, params=None, headers=None): """ Updates index aliases. :arg body: The definition of `actions` to perform - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Request timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -554,19 +593,22 @@ def update_aliases(self, body, params=None, headers=None): "POST", "/_aliases", params=params, headers=headers, body=body ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_alias(self, index, name, params=None, headers=None): """ Deletes an alias. - :arg index: A comma-separated list of index names (supports - wildcards); use `_all` for all indices - :arg name: A comma-separated list of aliases to delete (supports + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: Comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit timestamp for the document + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, name): if param in SKIP_IN_PATH: @@ -576,21 +618,24 @@ def delete_alias(self, index, name, params=None, headers=None): "DELETE", _make_path(index, "_alias", name), params=params, headers=headers ) - @query_params("create", "master_timeout", "cluster_manager_timeout", "order") + @query_params("cluster_manager_timeout", "create", "master_timeout", "order") def put_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + new or can also replace an existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg order: The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower - numbers) + numbers). """ for param in (name, body): if param in SKIP_IN_PATH: @@ -604,21 +649,22 @@ def put_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") def exists_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -627,36 +673,40 @@ def exists_template(self, name, params=None, headers=None): "HEAD", _make_path("_template", name), params=params, headers=headers ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") def get_template(self, name=None, params=None, headers=None): """ Returns an index template. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", _make_path("_template", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_template(self, name, params=None, headers=None): """ Deletes an index template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -667,38 +717,41 @@ def delete_template(self, name, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "include_defaults", "local", "master_timeout", - "cluster_manager_timeout", ) def get_settings(self, index=None, name=None, params=None, headers=None): """ Returns settings for one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices - :arg name: The name of the settings that should be included + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. + :arg name: Comma-separated list of settings. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: all - :arg flat_settings: Return settings in flat format (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg include_defaults: Whether to return all default setting for - each of the indices. + each of the indices. Default is false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", _make_path(index, "_settings", name), params=params, headers=headers @@ -706,11 +759,11 @@ def get_settings(self, index=None, name=None, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "flat_settings", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "preserve_existing", "timeout", ) @@ -720,24 +773,27 @@ def put_settings(self, body, index=None, params=None, headers=None): :arg body: The index settings to be updated - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flat_settings: Return settings in flat format (default: - false) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg flat_settings: Return settings in flat format. Default is + false. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg preserve_existing: Whether to update existing settings. If - set to `true` existing settings on an index remain unchanged, the - default is `false` - :arg timeout: Explicit operation timeout + set to `true` existing settings on an index remain unchanged. Default is + false. + :arg timeout: Operation timeout. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -760,43 +816,40 @@ def put_settings(self, body, index=None, params=None, headers=None): "include_segment_file_sizes", "include_unloaded_segments", "level", - "types", ) def stats(self, index=None, metric=None, params=None, headers=None): """ Provides statistics on operations happening in an index. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg metric: Limit the information returned the specific - metrics. Valid choices: _all, completion, docs, fielddata, query_cache, - flush, get, indexing, merge, request_cache, refresh, search, segments, - store, warmer, suggest - :arg completion_fields: A comma-separated list of fields for - `fielddata` and `suggest` index metric (supports wildcards) + metrics. Valid choices are _all, store, indexing, get, search, merge, + flush, refresh, query_cache, fielddata, docs, warmer, completion, + segments, translog, suggest, request_cache, recovery. + :arg completion_fields: Comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fielddata_fields: A comma-separated list of fields for - `fielddata` index metric (supports wildcards) - :arg fields: A comma-separated list of fields for `fielddata` - and `completion` index metric (supports wildcards) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fielddata_fields: Comma-separated list of fields for + `fielddata` index metric (supports wildcards). + :arg fields: Comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards). :arg forbid_closed_indices: If set to false stats will also collected from closed indices if explicitly specified or if - expand_wildcards expands to closed indices Default: True - :arg groups: A comma-separated list of search groups for - `search` index metric + expand_wildcards expands to closed indices. Default is True. + :arg groups: Comma-separated list of search groups for `search` + index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested) + applies if segment stats are requested). Default is false. :arg include_unloaded_segments: If set to true segment stats will include stats for segments that are not currently loaded into - memory + memory. Default is false. :arg level: Return stats aggregated at cluster, index or shard - level Valid choices: cluster, indices, shards Default: indices - :arg types: A comma-separated list of document types for the - `indexing` index metric + level. Valid choices are cluster, indices, shards. """ return self.transport.perform_request( "GET", _make_path(index, "_stats", metric), params=params, headers=headers @@ -810,17 +863,18 @@ def segments(self, index=None, params=None, headers=None): Provides low-level information about segments in a Lucene index. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg verbose: Includes detailed memory usage by Lucene. + should be ignored when unavailable (missing or closed). + :arg verbose: Includes detailed memory usage by Lucene. Default + is false. """ return self.transport.perform_request( "GET", _make_path(index, "_segments"), params=params, headers=headers @@ -846,32 +900,29 @@ def validate_query(self, body=None, index=None, params=None, headers=None): :arg body: The query definition specified with the Query DSL - :arg index: A comma-separated list of index names to restrict - the operation; use `_all` or empty string to perform the operation on - all indices - restrict the operation; leave empty to perform the operation on all - types + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg all_shards: Execute validation on all shards instead of one - random shard per index + random shard per index. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg analyze_wildcard: Specify whether wildcard and prefix - queries should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string + queries should be analyzed. Default is false. + :arg analyzer: The analyzer to use for the query string. :arg default_operator: The default operator for query string - query (AND or OR) Valid choices: AND, OR Default: OR + query (AND or OR). Valid choices are AND, OR. :arg df: The field to use as default where no field prefix is - given in the query string + given in the query string. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg explain: Return detailed information about the error + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg explain: Return detailed information about the error. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg lenient: Specify whether format-based query failures (such - as providing text to a numeric field) should be ignored - :arg q: Query in the Lucene query string syntax + as providing text to a numeric field) should be ignored. + :arg q: Query in the Lucene query string syntax. :arg rewrite: Provide a more detailed explanation showing the actual Lucene query that will be executed. """ @@ -897,21 +948,21 @@ def clear_cache(self, index=None, params=None, headers=None): Clears all or specific caches for one or more indices. - :arg index: A comma-separated list of index name to limit the - operation + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fielddata: Clear field data - :arg fields: A comma-separated list of fields to clear when - using the `fielddata` parameter (default: all) + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. + :arg fielddata: Clear field data. + :arg fields: Comma-separated list of fields to clear when using + the `fielddata` parameter (default: all). :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg query: Clear query caches - :arg request: Clear request cache + should be ignored when unavailable (missing or closed). + :arg query: Clear query caches. + :arg request: Clear request cache. """ return self.transport.perform_request( "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers @@ -923,12 +974,12 @@ def recovery(self, index=None, params=None, headers=None): Returns information about ongoing index shard recoveries. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg active_only: Display only those recoveries that are - currently on-going + currently on-going. Default is false. :arg detailed: Whether to display detailed information about - shard recovery + shard recovery. Default is false. """ return self.transport.perform_request( "GET", _make_path(index, "_recovery"), params=params, headers=headers @@ -943,23 +994,23 @@ def recovery(self, index=None, params=None, headers=None): ) def upgrade(self, index=None, params=None, headers=None): """ - DEPRECATED Upgrades to the current version of Lucene. + The _upgrade API is no longer useful and will be removed. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg only_ancient_segments: If true, only ancient (an older - Lucene major release) segments will be upgraded - :arg wait_for_completion: Specify whether the request should - block until the all segments are upgraded (default: false) + Lucene major release) segments will be upgraded. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is false. """ return self.transport.perform_request( "POST", _make_path(index, "_upgrade"), params=params, headers=headers @@ -968,49 +1019,24 @@ def upgrade(self, index=None, params=None, headers=None): @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") def get_upgrade(self, index=None, params=None, headers=None): """ - DEPRECATED Returns a progress status of current upgrade. + The _upgrade API is no longer useful and will be removed. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). """ return self.transport.perform_request( "GET", _make_path(index, "_upgrade"), params=params, headers=headers ) - @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def flush_synced(self, index=None, params=None, headers=None): - """ - Performs a synced flush operation on one or more indices. Synced flush is - deprecated. Use flush instead - - - :arg index: A comma-separated list of index names; use `_all` or - empty string for all indices - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, none, all Default: open - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - """ - return self.transport.perform_request( - "POST", - _make_path(index, "_flush", "synced"), - params=params, - headers=headers, - ) - @query_params( "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status" ) @@ -1019,19 +1045,18 @@ def shard_stores(self, index=None, params=None, headers=None): Provides store information for shard copies of indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg status: A comma-separated list of statuses used to filter - on shards to get store information for Valid choices: green, yellow, - red, all + should be ignored when unavailable (missing or closed). + :arg status: Comma-separated list of statuses used to filter on + shards to get store information for. """ return self.transport.perform_request( "GET", _make_path(index, "_shard_stores"), params=params, headers=headers @@ -1050,31 +1075,31 @@ def forcemerge(self, index=None, params=None, headers=None): Performs the force merge operation on one or more indices. - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices + :arg index: Comma-separated list of indices; use `_all` or empty + string to perform the operation on all indices. :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg flush: Specify whether the index should be flushed after - performing the operation (default: true) + performing the operation. Default is True. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) + should be ignored when unavailable (missing or closed). :arg max_num_segments: The number of segments the index should - be merged into (default: dynamic) + be merged into (default: dynamic). :arg only_expunge_deletes: Specify whether the operation should - only expunge deleted documents + only expunge deleted documents. """ return self.transport.perform_request( "POST", _make_path(index, "_forcemerge"), params=params, headers=headers ) @query_params( + "cluster_manager_timeout", "copy_settings", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1083,15 +1108,18 @@ def shrink(self, index, target, body=None, params=None, headers=None): Allow to shrink an existing index into a new index with fewer primary shards. - :arg index: The name of the source index to shrink - :arg target: The name of the target index to shrink into + :arg index: The name of the source index to shrink. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg copy_settings: whether or not to copy settings from the - source index (defaults to false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + source index. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ @@ -1108,9 +1136,9 @@ def shrink(self, index, target, body=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "copy_settings", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1120,15 +1148,18 @@ def split(self, index, target, body=None, params=None, headers=None): shards. - :arg index: The name of the source index to split - :arg target: The name of the target index to split into + :arg index: The name of the source index to split. + :arg target: The name of the target index. :arg body: The configuration for the target index (`settings` and `aliases`) + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg copy_settings: whether or not to copy settings from the - source index (defaults to false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + source index. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ @@ -1145,9 +1176,9 @@ def split(self, index, target, body=None, params=None, headers=None): ) @query_params( + "cluster_manager_timeout", "dry_run", "master_timeout", - "cluster_manager_timeout", "timeout", "wait_for_active_shards", ) @@ -1157,16 +1188,19 @@ def rollover(self, alias, body=None, new_index=None, params=None, headers=None): to be too large or too old. - :arg alias: The name of the alias to rollover + :arg alias: The name of the alias to rollover. :arg body: The conditions that needs to be met for executing rollover - :arg new_index: The name of the rollover index + :arg new_index: The name of the rollover index. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg dry_run: If set to true the rollover action will only be - validated but not actually performed even if a condition matches. The - default is false - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + validated but not actually performed even if a condition matches. + Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. :arg wait_for_active_shards: Set the number of active shards to wait for on the newly created rollover index before the operation returns. @@ -1182,133 +1216,34 @@ def rollover(self, alias, body=None, new_index=None, params=None, headers=None): body=body, ) - @query_params( - "allow_no_indices", - "expand_wildcards", - "ignore_unavailable", - "master_timeout", - "cluster_manager_timeout", - "timeout", - "wait_for_active_shards", - ) - def freeze(self, index, params=None, headers=None): - """ - Freezes an index. A frozen index has almost no overhead on the cluster (except - for maintaining its metadata in memory) and is read-only. - - - :arg index: The name of the index to freeze - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout - :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return self.transport.perform_request( - "POST", _make_path(index, "_freeze"), params=params, headers=headers - ) - - @query_params( - "allow_no_indices", - "expand_wildcards", - "ignore_unavailable", - "master_timeout", - "cluster_manager_timeout", - "timeout", - "wait_for_active_shards", - ) - def unfreeze(self, index, params=None, headers=None): - """ - Unfreezes an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. - - - :arg index: The name of the index to unfreeze - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: closed - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout - :arg wait_for_active_shards: Sets the number of active shards to - wait for before the operation returns. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return self.transport.perform_request( - "POST", _make_path(index, "_unfreeze"), params=params, headers=headers - ) - - @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def reload_search_analyzers(self, index, params=None, headers=None): - """ - Reloads an index's search analyzers and their resources. - - - :arg index: A comma-separated list of index names to reload - analyzers for - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return self.transport.perform_request( - "GET", - _make_path(index, "_reload_search_analyzers"), - params=params, - headers=headers, - ) - @query_params() - def create_data_stream(self, name, params=None, headers=None): + def create_data_stream(self, name, body=None, params=None, headers=None): """ - Creates a data stream + Creates or updates a data stream. - :arg name: The name of the data stream + :arg name: The name of the data stream. + :arg body: The data stream definition """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") return self.transport.perform_request( - "PUT", _make_path("_data_stream", name), params=params, headers=headers + "PUT", + _make_path("_data_stream", name), + params=params, + headers=headers, + body=body, ) - @query_params("expand_wildcards") + @query_params() def delete_data_stream(self, name, params=None, headers=None): """ Deletes a data stream. - :arg name: A comma-separated list of data streams to delete; use - `*` to delete all data streams - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1317,16 +1252,19 @@ def delete_data_stream(self, name, params=None, headers=None): "DELETE", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_index_template(self, name, params=None, headers=None): """ Deletes an index template. - :arg name: The name of the template - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1338,21 +1276,22 @@ def delete_index_template(self, name, params=None, headers=None): headers=headers, ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") def exists_index_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. - :arg name: The name of the template - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: The name of the template. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1361,40 +1300,44 @@ def exists_index_template(self, name, params=None, headers=None): "HEAD", _make_path("_index_template", name), params=params, headers=headers ) - @query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "flat_settings", "local", "master_timeout") def get_index_template(self, name=None, params=None, headers=None): """ Returns an index template. - :arg name: The comma separated names of the index templates - :arg flat_settings: Return settings in flat format (default: - false) + :arg name: Comma-separated names of the index templates. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg flat_settings: Return settings in flat format. Default is + false. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", _make_path("_index_template", name), params=params, headers=headers ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") def put_index_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. - :arg name: The name of the template + :arg name: The name of the template. :arg body: The template definition :arg cause: User defined reason for creating/updating the index - template + template. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template should only be added if - new or can also replace an existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + new or can also replace an existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (name, body): if param in SKIP_IN_PATH: @@ -1408,24 +1351,27 @@ def put_index_template(self, name, body, params=None, headers=None): body=body, ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") def simulate_index_template(self, name, body=None, params=None, headers=None): """ Simulate matching the given index name against the index templates in the - system + system. :arg name: The name of the index (it must be a concrete index - name) + name). :arg body: New index template definition, which will be included in the simulation, as if it already exists in the system :arg cause: User defined reason for dry-run creating the new - template for simulation purposes + template for simulation purposes. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an - existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1438,38 +1384,38 @@ def simulate_index_template(self, name, body=None, params=None, headers=None): body=body, ) - @query_params("expand_wildcards") + @query_params() def get_data_stream(self, name=None, params=None, headers=None): """ Returns data streams. - :arg name: A comma-separated list of data streams to get; use - `*` to get all data streams - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ return self.transport.perform_request( "GET", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params("cause", "create", "master_timeout", "cluster_manager_timeout") + @query_params("cause", "cluster_manager_timeout", "create", "master_timeout") def simulate_template(self, body=None, name=None, params=None, headers=None): """ - Simulate resolving the given template name or body + Simulate resolving the given template name or body. :arg body: New index template definition to be simulated, if no index template name is specified - :arg name: The name of the index template + :arg name: The name of the template. :arg cause: User defined reason for dry-run creating the new - template for simulation purposes + template for simulation purposes. Default is false. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an - existing one - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager + existing one. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "POST", @@ -1482,19 +1428,14 @@ def simulate_template(self, body=None, name=None, params=None, headers=None): @query_params("expand_wildcards") def resolve_index(self, name, params=None, headers=None): """ - Returns information about any matching indices, aliases, and data streams - - - .. warning:: + Returns information about any matching indices, aliases, and data streams. - This API is **experimental** so may include breaking changes - or be removed in a future version - :arg name: A comma-separated list of names or wildcard - expressions - :arg expand_wildcards: Whether wildcard expressions should get - expanded to open or closed indices (default: open) Valid choices: open, - closed, hidden, none, all Default: open + :arg name: Comma-separated list of names or wildcard + expressions. + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") @@ -1505,10 +1446,10 @@ def resolve_index(self, name, params=None, headers=None): @query_params( "allow_no_indices", + "cluster_manager_timeout", "expand_wildcards", "ignore_unavailable", "master_timeout", - "cluster_manager_timeout", "timeout", ) def add_block(self, index, block, params=None, headers=None): @@ -1516,20 +1457,23 @@ def add_block(self, index, block, params=None, headers=None): Adds a block to an index. - :arg index: A comma separated list of indices to add a block to + :arg index: Comma-separated list of indices to add a block to. :arg block: The block to add (one of read, write, read_only or - metadata) + metadata). :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) + string or when no indices have been specified). + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open + concrete indices that are open, closed or both. Valid choices are all, + open, closed, hidden, none. :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master - :arg cluster_manager_timeout: Specify timeout for connection to cluster_manager - :arg timeout: Explicit operation timeout + should be ignored when unavailable (missing or closed). + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ for param in (index, block): if param in SKIP_IN_PATH: @@ -1545,8 +1489,8 @@ def data_streams_stats(self, name=None, params=None, headers=None): Provides statistics on operations happening in a data stream. - :arg name: A comma-separated list of data stream names; use - `_all` or empty string to perform the operation on all data streams + :arg name: Comma-separated list of data streams; use `_all` or + empty string to perform the operation on all data streams. """ return self.transport.perform_request( "GET", @@ -1554,115 +1498,3 @@ def data_streams_stats(self, name=None, params=None, headers=None): params=params, headers=headers, ) - - @query_params() - def promote_data_stream(self, name, params=None, headers=None): - """ - Promotes a data stream from a replicated data stream managed by CCR to a - regular data stream - - - :arg name: The name of the data stream - """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'name'.") - - return self.transport.perform_request( - "POST", - _make_path("_data_stream", "_promote", name), - params=params, - headers=headers, - ) - - @query_params() - def migrate_to_data_stream(self, name, params=None, headers=None): - """ - Migrates an alias to a data stream - - - :arg name: The name of the alias to migrate - """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'name'.") - - return self.transport.perform_request( - "POST", - _make_path("_data_stream", "_migrate", name), - params=params, - headers=headers, - ) - - @query_params( - "allow_no_indices", - "expand_wildcards", - "flush", - "ignore_unavailable", - "run_expensive_tasks", - ) - def disk_usage(self, index, params=None, headers=None): - """ - Analyzes the disk usage of each field of an index or data stream - - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg index: Comma-separated list of indices or data streams to - analyze the disk usage - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg flush: Whether flush or not before analyzing the index disk - usage. Defaults to true - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - :arg run_expensive_tasks: Must be set to [true] in order for the - task to be performed. Defaults to false. - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return self.transport.perform_request( - "POST", _make_path(index, "_disk_usage"), params=params, headers=headers - ) - - @query_params( - "allow_no_indices", "expand_wildcards", "fields", "ignore_unavailable" - ) - def field_usage_stats(self, index, params=None, headers=None): - """ - Returns the field usage stats for each field of an index - - - .. warning:: - - This API is **experimental** so may include breaking changes - or be removed in a future version - - :arg index: A comma-separated list of index names; use `_all` or - empty string to perform the operation on all indices - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg expand_wildcards: Whether to expand wildcard expression to - concrete indices that are open, closed or both. Valid choices: open, - closed, hidden, none, all Default: open - :arg fields: A comma-separated list of fields to include in the - stats if only a subset of fields should be returned (supports wildcards) - :arg ignore_unavailable: Whether specified concrete indices - should be ignored when unavailable (missing or closed) - """ - if index in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'index'.") - - return self.transport.perform_request( - "GET", - _make_path(index, "_field_usage_stats"), - params=params, - headers=headers, - ) diff --git a/opensearchpy/client/indices.pyi b/opensearchpy/client/indices.pyi index 2393537a..87048693 100644 --- a/opensearchpy/client/indices.pyi +++ b/opensearchpy/client/indices.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -24,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -94,8 +104,8 @@ class IndicesClient(NamespacedClient): index: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -117,8 +127,8 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -139,13 +149,13 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., include_defaults: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -164,10 +174,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -188,10 +198,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -212,10 +222,10 @@ class IndicesClient(NamespacedClient): index: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -259,10 +269,10 @@ class IndicesClient(NamespacedClient): body: Any, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., write_index_only: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -283,11 +293,11 @@ class IndicesClient(NamespacedClient): *, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -330,8 +340,8 @@ class IndicesClient(NamespacedClient): name: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -394,8 +404,8 @@ class IndicesClient(NamespacedClient): self, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -415,8 +425,8 @@ class IndicesClient(NamespacedClient): index: Any, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -436,9 +446,9 @@ class IndicesClient(NamespacedClient): name: Any, *, body: Any, + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., order: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -457,10 +467,10 @@ class IndicesClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -478,10 +488,10 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -499,8 +509,8 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -521,13 +531,13 @@ class IndicesClient(NamespacedClient): index: Optional[Any] = ..., name: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., include_defaults: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -547,11 +557,11 @@ class IndicesClient(NamespacedClient): body: Any, index: Optional[Any] = ..., allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., flat_settings: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., preserve_existing: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -581,7 +591,6 @@ class IndicesClient(NamespacedClient): include_segment_file_sizes: Optional[Any] = ..., include_unloaded_segments: Optional[Any] = ..., level: Optional[Any] = ..., - types: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -731,26 +740,6 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def flush_synced( - self, - *, - index: Optional[Any] = ..., - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... def shard_stores( self, *, @@ -801,9 +790,9 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., copy_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -825,9 +814,9 @@ class IndicesClient(NamespacedClient): target: Any, *, body: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., copy_settings: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -849,57 +838,9 @@ class IndicesClient(NamespacedClient): *, body: Optional[Any] = ..., new_index: Optional[Any] = ..., - dry_run: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., - timeout: Optional[Any] = ..., - wait_for_active_shards: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def freeze( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - timeout: Optional[Any] = ..., - wait_for_active_shards: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def unfreeze( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., + dry_run: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., wait_for_active_shards: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -915,30 +856,11 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def reload_search_analyzers( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... def create_data_stream( self, name: Any, *, + body: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -956,7 +878,6 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - expand_wildcards: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -974,8 +895,8 @@ class IndicesClient(NamespacedClient): self, name: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -994,10 +915,10 @@ class IndicesClient(NamespacedClient): self, name: Any, *, + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1015,10 +936,10 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., flat_settings: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1038,9 +959,9 @@ class IndicesClient(NamespacedClient): *, body: Any, cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1060,9 +981,9 @@ class IndicesClient(NamespacedClient): *, body: Optional[Any] = ..., cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1080,7 +1001,6 @@ class IndicesClient(NamespacedClient): self, *, name: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1100,9 +1020,9 @@ class IndicesClient(NamespacedClient): body: Optional[Any] = ..., name: Optional[Any] = ..., cause: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., create: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -1140,10 +1060,10 @@ class IndicesClient(NamespacedClient): block: Any, *, allow_no_indices: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., expand_wildcards: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -1175,80 +1095,3 @@ class IndicesClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def promote_data_stream( - self, - name: Any, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def migrate_to_data_stream( - self, - name: Any, - *, - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def disk_usage( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - flush: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - run_expensive_tasks: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def field_usage_stats( - self, - index: Any, - *, - allow_no_indices: Optional[Any] = ..., - expand_wildcards: Optional[Any] = ..., - fields: Optional[Any] = ..., - ignore_unavailable: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... diff --git a/opensearchpy/client/ingest.py b/opensearchpy/client/ingest.py index fb9d4f79..6282c7b8 100644 --- a/opensearchpy/client/ingest.py +++ b/opensearchpy/client/ingest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -50,8 +51,8 @@ def get_pipeline(self, id=None, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers @@ -68,8 +69,8 @@ def put_pipeline(self, id, body, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ for param in (id, body): @@ -94,8 +95,8 @@ def delete_pipeline(self, id, params=None, headers=None): :arg cluster_manager_timeout: Operation timeout for connection to cluster-manager node. :arg master_timeout (Deprecated: To promote inclusive language, - use 'cluster_manager_timeout' instead): Operation timeout for connection - to master node. + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg timeout: Operation timeout. """ if id in SKIP_IN_PATH: @@ -117,7 +118,7 @@ def simulate(self, body, id=None, params=None, headers=None): :arg body: The simulate definition :arg id: Pipeline ID. :arg verbose: Verbose mode. Display data output for each - processor in executed pipeline. + processor in executed pipeline. Default is false. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") diff --git a/opensearchpy/client/ingest.pyi b/opensearchpy/client/ingest.pyi index 251071e3..c7531f0e 100644 --- a/opensearchpy/client/ingest.pyi +++ b/opensearchpy/client/ingest.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/nodes.py b/opensearchpy/client/nodes.py index 9dc1e1ab..28ea1357 100644 --- a/opensearchpy/client/nodes.py +++ b/opensearchpy/client/nodes.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -73,10 +74,10 @@ def info(self, node_id=None, metric=None, params=None, headers=None): node you're connecting to, leave empty to get information from all nodes. :arg metric: Comma-separated list of metrics you wish returned. - Leave empty to return all. Valid choices: settings, os, process, jvm, - thread_pool, transport, http, plugins, ingest - :arg flat_settings: Return settings in flat format. (default: - false) + Leave empty to return all. Valid choices are settings, os, process, jvm, + thread_pool, transport, http, plugins, ingest. + :arg flat_settings: Return settings in flat format. Default is + false. :arg timeout: Operation timeout. """ return self.transport.perform_request( @@ -105,13 +106,13 @@ def stats( node you're connecting to, leave empty to get information from all nodes. :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, breaker, fs, http, indices, jvm, os, - process, thread_pool, transport, discovery, indexing_pressure + metrics. Valid choices are _all, breaker, fs, http, indices, jvm, os, + process, thread_pool, transport, discovery, indexing_pressure. :arg index_metric: Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) - metric isn't specified. Valid choices: _all, store, indexing, get, + metric isn't specified. Valid choices are _all, store, indexing, get, search, merge, flush, refresh, query_cache, fielddata, docs, warmer, - completion, segments, translog, suggest, request_cache, recovery + completion, segments, translog, suggest, request_cache, recovery. :arg completion_fields: Comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards). :arg fielddata_fields: Comma-separated list of fields for @@ -122,9 +123,9 @@ def stats( index metric. :arg include_segment_file_sizes: Whether to report the aggregated disk usage of each one of the Lucene index files (only - applies if segment stats are requested). (default: false) + applies if segment stats are requested). Default is false. :arg level: Return indices stats aggregated at index, node or - shard level. Valid choices: indices, node, shards + shard level. Valid choices are indices, node, shards. :arg timeout: Operation timeout. :arg types: Comma-separated list of document types for the `indexing` index metric. @@ -148,16 +149,16 @@ def hot_threads(self, node_id=None, params=None, headers=None): the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. - :arg doc_type: The type to sample. Valid choices: cpu, wait, - block + :arg doc_type: The type to sample. Valid choices are cpu, wait, + block. :arg ignore_idle_threads: Don't show threads that are in known- idle places, such as waiting on a socket select or pulling from an empty - task queue. (default: True) + task queue. Default is True. :arg interval: The interval for the second sampling of threads. - :arg snapshots: Number of samples of thread stacktrace. - (default: 10) + :arg snapshots: Number of samples of thread stacktrace. Default + is 10. :arg threads: Specify the number of threads to provide - information for. (default: 3) + information for. Default is 3. :arg timeout: Operation timeout. """ # type is a reserved word so it cannot be used, use doc_type instead @@ -182,7 +183,7 @@ def usage(self, node_id=None, metric=None, params=None, headers=None): node you're connecting to, leave empty to get information from all nodes. :arg metric: Limit the information returned to the specified - metrics. Valid choices: _all, rest_actions + metrics. Valid choices are _all, rest_actions. :arg timeout: Operation timeout. """ return self.transport.perform_request( diff --git a/opensearchpy/client/nodes.pyi b/opensearchpy/client/nodes.pyi index 67e5a05c..78465481 100644 --- a/opensearchpy/client/nodes.pyi +++ b/opensearchpy/client/nodes.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/plugins.py b/opensearchpy/client/plugins.py index 7fba8c32..19570be4 100644 --- a/opensearchpy/client/plugins.py +++ b/opensearchpy/client/plugins.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -7,7 +8,6 @@ # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. - import warnings from ..plugins.alerting import AlertingClient @@ -45,9 +45,7 @@ def _dynamic_lookup(self, client): setattr(client, plugin, getattr(self, plugin)) else: warnings.warn( - "Cannot load `{plugin}` directly to OpenSearch. `{plugin}` already exists in OpenSearch. Please use `OpenSearch.plugin.{plugin}` instead.".format( - plugin=plugin - ), + f"Cannot load `{plugin}` directly to {self.client.__class__.__name__} as it already exists. Use `{self.client.__class__.__name__}.plugin.{plugin}` instead.", category=RuntimeWarning, stacklevel=2, ) diff --git a/opensearchpy/client/plugins.pyi b/opensearchpy/client/plugins.pyi index 2e4b2630..da9a7488 100644 --- a/opensearchpy/client/plugins.pyi +++ b/opensearchpy/client/plugins.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/remote.py b/opensearchpy/client/remote.py index 3f483697..eba66927 100644 --- a/opensearchpy/client/remote.py +++ b/opensearchpy/client/remote.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/remote.pyi b/opensearchpy/client/remote.pyi index 949301a7..93e8c067 100644 --- a/opensearchpy/client/remote.pyi +++ b/opensearchpy/client/remote.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/remote_store.py b/opensearchpy/client/remote_store.py new file mode 100644 index 00000000..8f4313b7 --- /dev/null +++ b/opensearchpy/client/remote_store.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + +from .utils import SKIP_IN_PATH, NamespacedClient, query_params + + +class RemoteStoreClient(NamespacedClient): + @query_params("cluster_manager_timeout", "wait_for_completion") + def restore(self, body, params=None, headers=None): + """ + Restores from remote store. + + + :arg body: Comma-separated list of index IDs + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg wait_for_completion: Should this request wait until the + operation has completed before returning. Default is false. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_remotestore/_restore", params=params, headers=headers, body=body + ) diff --git a/opensearchpy/client/remote_store.pyi b/opensearchpy/client/remote_store.pyi new file mode 100644 index 00000000..50358e63 --- /dev/null +++ b/opensearchpy/client/remote_store.pyi @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + +from typing import Any, Collection, MutableMapping, Optional, Tuple, Union + +from .utils import NamespacedClient + +class RemoteStoreClient(NamespacedClient): + def restore( + self, + *, + body: Any, + cluster_manager_timeout: Optional[Any] = ..., + wait_for_completion: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... diff --git a/opensearchpy/client/security.py b/opensearchpy/client/security.py index 32a362b3..14bc0229 100644 --- a/opensearchpy/client/security.py +++ b/opensearchpy/client/security.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -7,6 +8,17 @@ # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. + +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params @@ -17,25 +29,25 @@ class SecurityClient(NamespacedClient): def get_account_details(self, params=None, headers=None): """ Returns account details for the current user. + """ return self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "account"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/account", params=params, headers=headers ) @query_params() def change_password(self, body, params=None, headers=None): """ Changes the password for the current user. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PUT", - _make_path("_plugins", "_security", "api", "account"), + "/_plugins/_security/api/account", params=params, headers=headers, body=body, @@ -45,10 +57,13 @@ def change_password(self, body, params=None, headers=None): def get_action_group(self, action_group, params=None, headers=None): """ Retrieves one action group. + + + :arg action_group: Action group to retrieve. """ if action_group in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'action-group'." + "Empty value passed for a required argument 'action_group'." ) return self.transport.perform_request( @@ -62,10 +77,11 @@ def get_action_group(self, action_group, params=None, headers=None): def get_action_groups(self, params=None, headers=None): """ Retrieves all action groups. + """ return self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "actiongroups"), + "/_plugins/_security/api/actiongroups/", params=params, headers=headers, ) @@ -73,11 +89,14 @@ def get_action_groups(self, params=None, headers=None): @query_params() def delete_action_group(self, action_group, params=None, headers=None): """ - Deletes the specified action group. + Delete a specified action group. + + + :arg action_group: Action group to delete. """ if action_group in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'action-group'." + "Empty value passed for a required argument 'action_group'." ) return self.transport.perform_request( @@ -91,6 +110,10 @@ def delete_action_group(self, action_group, params=None, headers=None): def create_action_group(self, action_group, body, params=None, headers=None): """ Creates or replaces the specified action group. + + + :arg action_group: The name of the action group to create or + replace """ for param in (action_group, body): if param in SKIP_IN_PATH: @@ -108,6 +131,8 @@ def create_action_group(self, action_group, body, params=None, headers=None): def patch_action_group(self, action_group, body, params=None, headers=None): """ Updates individual attributes of an action group. + + """ for param in (action_group, body): if param in SKIP_IN_PATH: @@ -125,13 +150,15 @@ def patch_action_group(self, action_group, body, params=None, headers=None): def patch_action_groups(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple action groups in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "actiongroups"), + "/_plugins/_security/api/actiongroups", params=params, headers=headers, body=body, @@ -140,7 +167,9 @@ def patch_action_groups(self, body, params=None, headers=None): @query_params() def get_user(self, username, params=None, headers=None): """ - Retrieves one user. + Retrieve one internal user. + + """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") @@ -155,11 +184,12 @@ def get_user(self, username, params=None, headers=None): @query_params() def get_users(self, params=None, headers=None): """ - Retrieves all users. + Retrieve all internal users. + """ return self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "internalusers"), + "/_plugins/_security/api/internalusers", params=params, headers=headers, ) @@ -167,7 +197,9 @@ def get_users(self, params=None, headers=None): @query_params() def delete_user(self, username, params=None, headers=None): """ - Deletes the specified user. + Delete the specified user. + + """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") @@ -183,6 +215,8 @@ def delete_user(self, username, params=None, headers=None): def create_user(self, username, body, params=None, headers=None): """ Creates or replaces the specified user. + + """ for param in (username, body): if param in SKIP_IN_PATH: @@ -200,6 +234,8 @@ def create_user(self, username, body, params=None, headers=None): def patch_user(self, username, body, params=None, headers=None): """ Updates individual attributes of an internal user. + + """ for param in (username, body): if param in SKIP_IN_PATH: @@ -217,13 +253,15 @@ def patch_user(self, username, body, params=None, headers=None): def patch_users(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple internal users in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "internalusers"), + "/_plugins/_security/api/internalusers", params=params, headers=headers, body=body, @@ -233,6 +271,8 @@ def patch_users(self, body, params=None, headers=None): def get_role(self, role, params=None, headers=None): """ Retrieves one role. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -248,18 +288,18 @@ def get_role(self, role, params=None, headers=None): def get_roles(self, params=None, headers=None): """ Retrieves all roles. + """ return self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "roles"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/roles/", params=params, headers=headers ) @query_params() def delete_role(self, role, params=None, headers=None): """ - Deletes the specified role. + Delete the specified role. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -275,6 +315,8 @@ def delete_role(self, role, params=None, headers=None): def create_role(self, role, body, params=None, headers=None): """ Creates or replaces the specified role. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -292,6 +334,8 @@ def create_role(self, role, body, params=None, headers=None): def patch_role(self, role, body, params=None, headers=None): """ Updates individual attributes of a role. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -309,13 +353,15 @@ def patch_role(self, role, body, params=None, headers=None): def patch_roles(self, body, params=None, headers=None): """ Creates, updates, or deletes multiple roles in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "roles"), + "/_plugins/_security/api/roles", params=params, headers=headers, body=body, @@ -325,6 +371,8 @@ def patch_roles(self, body, params=None, headers=None): def get_role_mapping(self, role, params=None, headers=None): """ Retrieves one role mapping. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -340,10 +388,11 @@ def get_role_mapping(self, role, params=None, headers=None): def get_role_mappings(self, params=None, headers=None): """ Retrieves all role mappings. + """ return self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "rolesmapping"), + "/_plugins/_security/api/rolesmapping", params=params, headers=headers, ) @@ -352,6 +401,8 @@ def get_role_mappings(self, params=None, headers=None): def delete_role_mapping(self, role, params=None, headers=None): """ Deletes the specified role mapping. + + """ if role in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'role'.") @@ -367,6 +418,8 @@ def delete_role_mapping(self, role, params=None, headers=None): def create_role_mapping(self, role, body, params=None, headers=None): """ Creates or replaces the specified role mapping. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -384,6 +437,8 @@ def create_role_mapping(self, role, body, params=None, headers=None): def patch_role_mapping(self, role, body, params=None, headers=None): """ Updates individual attributes of a role mapping. + + """ for param in (role, body): if param in SKIP_IN_PATH: @@ -401,13 +456,15 @@ def patch_role_mapping(self, role, body, params=None, headers=None): def patch_role_mappings(self, body, params=None, headers=None): """ Creates or updates multiple role mappings in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "rolesmapping"), + "/_plugins/_security/api/rolesmapping", params=params, headers=headers, body=body, @@ -417,6 +474,8 @@ def patch_role_mappings(self, body, params=None, headers=None): def get_tenant(self, tenant, params=None, headers=None): """ Retrieves one tenant. + + """ if tenant in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'tenant'.") @@ -432,18 +491,18 @@ def get_tenant(self, tenant, params=None, headers=None): def get_tenants(self, params=None, headers=None): """ Retrieves all tenants. + """ return self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "tenants"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/tenants/", params=params, headers=headers ) @query_params() def delete_tenant(self, tenant, params=None, headers=None): """ - Deletes the specified tenant. + Delete the specified tenant. + + """ if tenant in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'tenant'.") @@ -459,6 +518,8 @@ def delete_tenant(self, tenant, params=None, headers=None): def create_tenant(self, tenant, body, params=None, headers=None): """ Creates or replaces the specified tenant. + + """ for param in (tenant, body): if param in SKIP_IN_PATH: @@ -476,6 +537,8 @@ def create_tenant(self, tenant, body, params=None, headers=None): def patch_tenant(self, tenant, body, params=None, headers=None): """ Add, delete, or modify a single tenant. + + """ for param in (tenant, body): if param in SKIP_IN_PATH: @@ -493,13 +556,15 @@ def patch_tenant(self, tenant, body, params=None, headers=None): def patch_tenants(self, body, params=None, headers=None): """ Add, delete, or modify multiple tenants in a single call. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "tenants"), + "/_plugins/_security/api/tenants/", params=params, headers=headers, body=body, @@ -508,11 +573,12 @@ def patch_tenants(self, body, params=None, headers=None): @query_params() def get_configuration(self, params=None, headers=None): """ - Retrieves the current Security plugin configuration in JSON format. + Returns the current Security plugin configuration in JSON format. + """ return self.transport.perform_request( "GET", - _make_path("_plugins", "_security", "api", "securityconfig"), + "/_plugins/_security/api/securityconfig", params=params, headers=headers, ) @@ -520,14 +586,16 @@ def get_configuration(self, params=None, headers=None): @query_params() def update_configuration(self, body, params=None, headers=None): """ - Retrieves the current Security plugin configuration in JSON format. + Adds or updates the existing configuration using the REST API. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PUT", - _make_path("_plugins", "_security", "api", "securityconfig", "config"), + "/_plugins/_security/api/securityconfig/config", params=params, headers=headers, body=body, @@ -536,14 +604,16 @@ def update_configuration(self, body, params=None, headers=None): @query_params() def patch_configuration(self, body, params=None, headers=None): """ - Updates the existing configuration using the REST API. + A PATCH call is used to update the existing configuration using the REST API. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_plugins", "_security", "api", "securityconfig"), + "/_plugins/_security/api/securityconfig", params=params, headers=headers, body=body, @@ -553,6 +623,8 @@ def patch_configuration(self, body, params=None, headers=None): def get_distinguished_names(self, cluster_name=None, params=None, headers=None): """ Retrieves all distinguished names in the allow list. + + """ return self.transport.perform_request( "GET", @@ -562,13 +634,19 @@ def get_distinguished_names(self, cluster_name=None, params=None, headers=None): ) @query_params() - def update_distinguished_names(self, cluster_name, body, params=None, headers=None): + def update_distinguished_names( + self, cluster_name, body=None, params=None, headers=None + ): """ - Adds or updates the specified distinguished names in the cluster's or node's allow list. + Adds or updates the specified distinguished names in the cluster’s or node’s + allow list. + + """ - for param in (cluster_name, body): - if param in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument.") + if cluster_name in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'cluster_name'." + ) return self.transport.perform_request( "PUT", @@ -581,11 +659,14 @@ def update_distinguished_names(self, cluster_name, body, params=None, headers=No @query_params() def delete_distinguished_names(self, cluster_name, params=None, headers=None): """ - Deletes all distinguished names in the specified cluster's or node's allow list. + Deletes all distinguished names in the specified cluster’s or node’s allow + list. + + """ if cluster_name in SKIP_IN_PATH: raise ValueError( - "Empty value passed for a required argument 'cluster-name'." + "Empty value passed for a required argument 'cluster_name'." ) return self.transport.perform_request( @@ -598,25 +679,22 @@ def delete_distinguished_names(self, cluster_name, params=None, headers=None): @query_params() def get_certificates(self, params=None, headers=None): """ - Retrieves the cluster's security certificates. + Retrieves the cluster’s security certificates. + """ return self.transport.perform_request( - "GET", - _make_path("_plugins", "_security", "api", "ssl", "certs"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/ssl/certs", params=params, headers=headers ) @query_params() def reload_transport_certificates(self, params=None, headers=None): """ - Reloads SSL certificates that are about to expire without restarting the OpenSearch node. + Reload transport layer communication certificates. + """ return self.transport.perform_request( "PUT", - _make_path( - "_opendistro", "_security", "api", "ssl", "transport", "reloadcerts" - ), + "/_plugins/_security/api/ssl/transport/reloadcerts", params=params, headers=headers, ) @@ -624,11 +702,12 @@ def reload_transport_certificates(self, params=None, headers=None): @query_params() def reload_http_certificates(self, params=None, headers=None): """ - Reloads SSL certificates that are about to expire without restarting the OpenSearch node. + Reload HTTP layer communication certificates. + """ return self.transport.perform_request( "PUT", - _make_path("_opendistro", "_security", "api", "ssl", "http", "reloadcerts"), + "/_plugins/_security/api/ssl/http/reloadcerts", params=params, headers=headers, ) @@ -637,12 +716,10 @@ def reload_http_certificates(self, params=None, headers=None): def flush_cache(self, params=None, headers=None): """ Flushes the Security plugin user, authentication, and authorization cache. + """ return self.transport.perform_request( - "DELETE", - _make_path("_plugins", "_security", "api", "cache"), - params=params, - headers=headers, + "DELETE", "/_plugins/_security/api/cache", params=params, headers=headers ) @query_params() @@ -658,13 +735,11 @@ def health(self, params=None, headers=None): @query_params() def get_audit_configuration(self, params=None, headers=None): """ - A GET call retrieves the audit configuration. + Retrieves the audit configuration. + """ return self.transport.perform_request( - "GET", - _make_path("_opendistro", "_security", "api", "audit"), - params=params, - headers=headers, + "GET", "/_plugins/_security/api/audit", params=params, headers=headers ) @query_params() @@ -672,6 +747,7 @@ def update_audit_configuration(self, body, params=None, headers=None): """ Updates the audit configuration. + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") @@ -688,13 +764,33 @@ def update_audit_configuration(self, body, params=None, headers=None): def patch_audit_configuration(self, body, params=None, headers=None): """ A PATCH call is used to update specified fields in the audit configuration. + + + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PATCH", + "/_plugins/_security/api/audit", + params=params, + headers=headers, + body=body, + ) + + @query_params() + def patch_distinguished_names(self, body, params=None, headers=None): + """ + Bulk update of distinguished names. + + """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PATCH", - _make_path("_opendistro", "_security", "api", "audit"), + "/_plugins/_security/api/nodesdn", params=params, headers=headers, body=body, diff --git a/opensearchpy/client/security.pyi b/opensearchpy/client/security.pyi index c729d5d3..99e009d9 100644 --- a/opensearchpy/client/security.pyi +++ b/opensearchpy/client/security.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -6,194 +7,263 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. + +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union -from .utils import NamespacedClient as NamespacedClient +from .utils import NamespacedClient class SecurityClient(NamespacedClient): def get_account_details( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def change_password( self, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def get_action_group( self, action_group: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def get_action_groups( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def delete_action_group( self, action_group: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def create_action_group( self, action_group: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def patch_action_group( self, action_group: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def patch_action_groups( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def get_user( self, username: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def get_users( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def delete_user( self, username: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def create_user( self, username: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def patch_user( self, username: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... def patch_users( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_roles( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def delete_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def create_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def patch_role( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def patch_roles( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_role_mappings( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def delete_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def create_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def patch_role_mapping( - self, role: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def patch_role_mappings( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_tenant( - self, - tenant: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def get_tenants( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def delete_tenant( - self, - tenant: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def create_tenant( - self, - tenant: Any, - body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def patch_tenant( - self, - tenant: Any, - body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def patch_tenants( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_configuration( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def update_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def patch_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def get_distinguished_names( - self, - cluster_name: Union[Any, None] = ..., - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def update_distinguished_names( self, - cluster_name: Any, + *, body: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def delete_distinguished_names( - self, - cluster_name: Any, - params: Union[Any, None] = ..., - headers: Union[Any, None] = ..., - ) -> Union[bool, Any]: ... - def get_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def reload_transport_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def reload_http_certificates( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def flush_cache( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def health( + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_role( self, + role: Any, *, pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -208,13 +278,9 @@ class SecurityClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def get_audit_configuration( - self, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... - def update_audit_configuration( + def get_roles( self, *, - body: Any, pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -228,6 +294,528 @@ class SecurityClient(NamespacedClient): params: Optional[MutableMapping[str, Any]] = ..., headers: Optional[MutableMapping[str, str]] = ..., ) -> Any: ... - def patch_audit_configuration( - self, body: Any, params: Union[Any, None] = ..., headers: Union[Any, None] = ... - ) -> Union[bool, Any]: ... + def delete_role( + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def create_role( + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_role( + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_roles( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_role_mapping( + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_role_mappings( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def delete_role_mapping( + self, + role: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def create_role_mapping( + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_role_mapping( + self, + role: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_role_mappings( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_tenant( + self, + tenant: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_tenants( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def delete_tenant( + self, + tenant: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def create_tenant( + self, + tenant: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_tenant( + self, + tenant: Any, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_tenants( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_configuration( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def update_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_distinguished_names( + self, + *, + cluster_name: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def update_distinguished_names( + self, + cluster_name: Any, + *, + body: Optional[Any] = ..., + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def delete_distinguished_names( + self, + cluster_name: Any, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_certificates( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def reload_transport_certificates( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def reload_http_certificates( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def flush_cache( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def health( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def get_audit_configuration( + self, + *, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def update_audit_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_audit_configuration( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... + def patch_distinguished_names( + self, + *, + body: Any, + pretty: Optional[bool] = ..., + human: Optional[bool] = ..., + error_trace: Optional[bool] = ..., + format: Optional[str] = ..., + filter_path: Optional[Union[str, Collection[str]]] = ..., + request_timeout: Optional[Union[int, float]] = ..., + ignore: Optional[Union[int, Collection[int]]] = ..., + opaque_id: Optional[str] = ..., + http_auth: Optional[Union[str, Tuple[str, str]]] = ..., + api_key: Optional[Union[str, Tuple[str, str]]] = ..., + params: Optional[MutableMapping[str, Any]] = ..., + headers: Optional[MutableMapping[str, str]] = ..., + ) -> Any: ... diff --git a/opensearchpy/client/snapshot.py b/opensearchpy/client/snapshot.py index 200fff96..313f7dd3 100644 --- a/opensearchpy/client/snapshot.py +++ b/opensearchpy/client/snapshot.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -25,25 +26,36 @@ # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + + from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class SnapshotClient(NamespacedClient): - @query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion") + @query_params("cluster_manager_timeout", "master_timeout", "wait_for_completion") def create(self, repository, snapshot, body=None, params=None, headers=None): """ Creates a snapshot in a repository. - :arg repository: A repository name - :arg snapshot: A snapshot name + :arg repository: Repository name. + :arg snapshot: Snapshot name. :arg body: The snapshot definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_completion: Should this request wait until the - operation has completed before returning + operation has completed before returning. Default is false. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -57,18 +69,19 @@ def create(self, repository, snapshot, body=None, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") def delete(self, repository, snapshot, params=None, headers=None): """ Deletes a snapshot. - :arg repository: A repository name - :arg snapshot: A snapshot name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg repository: Repository name. + :arg snapshot: Snapshot name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -82,33 +95,25 @@ def delete(self, repository, snapshot, params=None, headers=None): ) @query_params( - "ignore_unavailable", - "include_repository", - "index_details", - "master_timeout", - "cluster_manager_timeout", - "verbose", + "cluster_manager_timeout", "ignore_unavailable", "master_timeout", "verbose" ) def get(self, repository, snapshot, params=None, headers=None): """ Returns information about a snapshot. - :arg repository: A repository name - :arg snapshot: A comma-separated list of snapshot names + :arg repository: Repository name. + :arg snapshot: Comma-separated list of snapshot names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg ignore_unavailable: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is - thrown - :arg include_repository: Whether to include the repository name - in the snapshot info. Defaults to true. - :arg index_details: Whether to include details of each index in - the snapshot, if those details are available. Defaults to false. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + thrown. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg verbose: Whether to show verbose snapshot info or only show - the basic info found in the repository index blob + the basic info found in the repository index blob. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -121,7 +126,7 @@ def get(self, repository, snapshot, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def delete_repository(self, repository, params=None, headers=None): """ Deletes a repository. @@ -129,11 +134,12 @@ def delete_repository(self, repository, params=None, headers=None): :arg repository: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -145,38 +151,40 @@ def delete_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("local", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "local", "master_timeout") def get_repository(self, repository=None, params=None, headers=None): """ Returns information about a repository. - :arg repository: A comma-separated list of repository names + :arg repository: Comma-separated list of repository names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg local: Return local information, do not retrieve the state - from cluster_manager node (default: false) - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + from cluster-manager node. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", _make_path("_snapshot", repository), params=params, headers=headers ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout", "verify") + @query_params("cluster_manager_timeout", "master_timeout", "timeout", "verify") def create_repository(self, repository, body, params=None, headers=None): """ Creates a repository. - :arg repository: A repository name + :arg repository: Repository name. :arg body: The repository definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout - :arg verify: Whether to verify the repository after creation + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. + :arg verify: Whether to verify the repository after creation. """ for param in (repository, body): if param in SKIP_IN_PATH: @@ -190,21 +198,22 @@ def create_repository(self, repository, body, params=None, headers=None): body=body, ) - @query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion") + @query_params("cluster_manager_timeout", "master_timeout", "wait_for_completion") def restore(self, repository, snapshot, body=None, params=None, headers=None): """ Restores a snapshot. - :arg repository: A repository name - :arg snapshot: A snapshot name + :arg repository: Repository name. + :arg snapshot: Snapshot name. :arg body: Details of what to restore - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. :arg wait_for_completion: Should this request wait until the - operation has completed before returning + operation has completed before returning. Default is false. """ for param in (repository, snapshot): if param in SKIP_IN_PATH: @@ -218,21 +227,22 @@ def restore(self, repository, snapshot, body=None, params=None, headers=None): body=body, ) - @query_params("ignore_unavailable", "master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "ignore_unavailable", "master_timeout") def status(self, repository=None, snapshot=None, params=None, headers=None): """ Returns information about the status of a snapshot. - :arg repository: A repository name - :arg snapshot: A comma-separated list of snapshot names + :arg repository: Repository name. + :arg snapshot: Comma-separated list of snapshot names. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. :arg ignore_unavailable: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is - thrown - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + thrown. Default is false. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ return self.transport.perform_request( "GET", @@ -241,18 +251,19 @@ def status(self, repository=None, snapshot=None, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def verify_repository(self, repository, params=None, headers=None): """ Verifies a repository. - :arg repository: A repository name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg repository: Repository name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -264,18 +275,19 @@ def verify_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout", "timeout") + @query_params("cluster_manager_timeout", "master_timeout", "timeout") def cleanup_repository(self, repository, params=None, headers=None): """ Removes stale data from repository. - :arg repository: A repository name - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node - :arg timeout: Explicit operation timeout + :arg repository: Repository name. + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. + :arg timeout: Operation timeout. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") @@ -287,7 +299,7 @@ def cleanup_repository(self, repository, params=None, headers=None): headers=headers, ) - @query_params("master_timeout", "cluster_manager_timeout") + @query_params("cluster_manager_timeout", "master_timeout") def clone( self, repository, snapshot, target_snapshot, body, params=None, headers=None ): @@ -295,14 +307,15 @@ def clone( Clones indices from one snapshot into another snapshot in the same repository. - :arg repository: A repository name - :arg snapshot: The name of the snapshot to clone from - :arg target_snapshot: The name of the cloned snapshot to create + :arg repository: Repository name. + :arg snapshot: Snapshot name. + :arg target_snapshot: The name of the cloned snapshot to create. :arg body: The snapshot clone definition - :arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection - to master node - :arg cluster_manager_timeout: Explicit operation timeout for connection - to cluster_manager node + :arg cluster_manager_timeout: Operation timeout for connection + to cluster-manager node. + :arg master_timeout (Deprecated: To promote inclusive language, + use 'cluster_manager_timeout' instead.): Operation timeout for + connection to master node. """ for param in (repository, snapshot, target_snapshot, body): if param in SKIP_IN_PATH: @@ -315,56 +328,3 @@ def clone( headers=headers, body=body, ) - - @query_params( - "blob_count", - "concurrency", - "detailed", - "early_read_node_count", - "max_blob_size", - "max_total_data_size", - "rare_action_probability", - "rarely_abort_writes", - "read_node_count", - "seed", - "timeout", - ) - def repository_analyze(self, repository, params=None, headers=None): - """ - Analyzes a repository for correctness and performance - - - :arg repository: A repository name - :arg blob_count: Number of blobs to create during the test. - Defaults to 100. - :arg concurrency: Number of operations to run concurrently - during the test. Defaults to 10. - :arg detailed: Whether to return detailed results or a summary. - Defaults to 'false' so that only the summary is returned. - :arg early_read_node_count: Number of nodes on which to perform - an early read on a blob, i.e. before writing has completed. Early reads - are rare actions so the 'rare_action_probability' parameter is also - relevant. Defaults to 2. - :arg max_blob_size: Maximum size of a blob to create during the - test, e.g '1gb' or '100mb'. Defaults to '10mb'. - :arg max_total_data_size: Maximum total size of all blobs to - create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'. - :arg rare_action_probability: Probability of taking a rare - action such as an early read or an overwrite. Defaults to 0.02. - :arg rarely_abort_writes: Whether to rarely abort writes before - they complete. Defaults to 'true'. - :arg read_node_count: Number of nodes on which to read a blob - after writing. Defaults to 10. - :arg seed: Seed for the random number generator used to create - the test workload. Defaults to a random value. - :arg timeout: Explicit operation timeout. Defaults to '30s'. - """ - if repository in SKIP_IN_PATH: - raise ValueError("Empty value passed for a required argument 'repository'.") - - return self.transport.perform_request( - "POST", - _make_path("_snapshot", repository, "_analyze"), - params=params, - headers=headers, - ) diff --git a/opensearchpy/client/snapshot.pyi b/opensearchpy/client/snapshot.pyi index b1db95c0..fd239fad 100644 --- a/opensearchpy/client/snapshot.pyi +++ b/opensearchpy/client/snapshot.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -24,6 +25,15 @@ # specific language governing permissions and limitations # under the License. +# ---------------------------------------------------- +# THIS CODE IS GENERATED AND MANUAL EDITS WILL BE LOST. +# +# To contribute, kindly make essential modifications through either the "opensearch-py client generator": +# https://github.com/opensearch-project/opensearch-py/blob/main/utils/generate-api.py +# or the "OpenSearch API specification" available at: +# https://github.com/opensearch-project/opensearch-api-specification/blob/main/OpenSearch.openapi.json +# ----------------------------------------------------- + from typing import Any, Collection, MutableMapping, Optional, Tuple, Union from .utils import NamespacedClient @@ -35,8 +45,8 @@ class SnapshotClient(NamespacedClient): snapshot: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -56,8 +66,8 @@ class SnapshotClient(NamespacedClient): repository: Any, snapshot: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -76,11 +86,9 @@ class SnapshotClient(NamespacedClient): repository: Any, snapshot: Any, *, + cluster_manager_timeout: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., - include_repository: Optional[Any] = ..., - index_details: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., verbose: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -99,8 +107,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -119,9 +127,9 @@ class SnapshotClient(NamespacedClient): self, *, repository: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., local: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -140,8 +148,8 @@ class SnapshotClient(NamespacedClient): repository: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., verify: Optional[Any] = ..., pretty: Optional[bool] = ..., @@ -163,8 +171,8 @@ class SnapshotClient(NamespacedClient): snapshot: Any, *, body: Optional[Any] = ..., - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., wait_for_completion: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -184,9 +192,9 @@ class SnapshotClient(NamespacedClient): *, repository: Optional[Any] = ..., snapshot: Optional[Any] = ..., + cluster_manager_timeout: Optional[Any] = ..., ignore_unavailable: Optional[Any] = ..., master_timeout: Optional[Any] = ..., - cluster_manager_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., @@ -204,8 +212,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -224,8 +232,8 @@ class SnapshotClient(NamespacedClient): self, repository: Any, *, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., @@ -247,36 +255,8 @@ class SnapshotClient(NamespacedClient): target_snapshot: Any, *, body: Any, - master_timeout: Optional[Any] = ..., cluster_manager_timeout: Optional[Any] = ..., - pretty: Optional[bool] = ..., - human: Optional[bool] = ..., - error_trace: Optional[bool] = ..., - format: Optional[str] = ..., - filter_path: Optional[Union[str, Collection[str]]] = ..., - request_timeout: Optional[Union[int, float]] = ..., - ignore: Optional[Union[int, Collection[int]]] = ..., - opaque_id: Optional[str] = ..., - http_auth: Optional[Union[str, Tuple[str, str]]] = ..., - api_key: Optional[Union[str, Tuple[str, str]]] = ..., - params: Optional[MutableMapping[str, Any]] = ..., - headers: Optional[MutableMapping[str, str]] = ..., - ) -> Any: ... - def repository_analyze( - self, - repository: Any, - *, - blob_count: Optional[Any] = ..., - concurrency: Optional[Any] = ..., - detailed: Optional[Any] = ..., - early_read_node_count: Optional[Any] = ..., - max_blob_size: Optional[Any] = ..., - max_total_data_size: Optional[Any] = ..., - rare_action_probability: Optional[Any] = ..., - rarely_abort_writes: Optional[Any] = ..., - read_node_count: Optional[Any] = ..., - seed: Optional[Any] = ..., - timeout: Optional[Any] = ..., + master_timeout: Optional[Any] = ..., pretty: Optional[bool] = ..., human: Optional[bool] = ..., error_trace: Optional[bool] = ..., diff --git a/opensearchpy/client/tasks.py b/opensearchpy/client/tasks.py index ff76a3a6..90c4e731 100644 --- a/opensearchpy/client/tasks.py +++ b/opensearchpy/client/tasks.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -57,9 +58,10 @@ def list(self, params=None, headers=None): :arg actions: Comma-separated list of actions that should be returned. Leave empty to return all. - :arg detailed: Return detailed task information. + :arg detailed: Return detailed task information. Default is + false. :arg group_by: Group tasks by nodes or parent/child - relationships. Valid choices: nodes, parents, none + relationships. Valid choices are nodes, parents, none. :arg nodes: Comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all @@ -68,7 +70,7 @@ def list(self, params=None, headers=None): (node_id:task_number). Set to -1 to return all. :arg timeout: Operation timeout. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ return self.transport.perform_request( "GET", "/_tasks", params=params, headers=headers @@ -91,7 +93,7 @@ def cancel(self, task_id=None, params=None, headers=None): :arg parent_task_id: Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ return self.transport.perform_request( "POST", @@ -110,7 +112,7 @@ def get(self, task_id=None, params=None, headers=None): (node_id:task_number). :arg timeout: Operation timeout. :arg wait_for_completion: Should this request wait until the - operation has completed before returning. + operation has completed before returning. Default is false. """ if task_id in SKIP_IN_PATH: warnings.warn( diff --git a/opensearchpy/client/tasks.pyi b/opensearchpy/client/tasks.pyi index 50ad69bb..0aeed153 100644 --- a/opensearchpy/client/tasks.pyi +++ b/opensearchpy/client/tasks.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/client/utils.py b/opensearchpy/client/utils.py index a5e99b11..7b7366de 100644 --- a/opensearchpy/client/utils.py +++ b/opensearchpy/client/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -32,7 +33,7 @@ from datetime import date, datetime from functools import wraps -from ..compat import PY2, quote, string_types, to_bytes, to_str, unquote, urlparse +from ..compat import quote, string_types, to_bytes, to_str, unquote, urlparse # parts of URL to be omitted SKIP_IN_PATH = (None, "", b"", [], ()) @@ -107,9 +108,7 @@ def _escape(value): # encode strings to utf-8 if isinstance(value, string_types): - if PY2 and isinstance(value, unicode): # noqa: F821 - return value.encode("utf-8") - if not PY2 and isinstance(value, str): + if isinstance(value, str): return value.encode("utf-8") return str(value) diff --git a/opensearchpy/client/utils.pyi b/opensearchpy/client/utils.pyi index 4924fed9..2aa263fc 100644 --- a/opensearchpy/client/utils.pyi +++ b/opensearchpy/client/utils.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/compat.py b/opensearchpy/compat.py index a5169050..57a88a74 100644 --- a/opensearchpy/compat.py +++ b/opensearchpy/compat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -25,41 +26,23 @@ # under the License. -import sys +from queue import Queue +from urllib.parse import quote, quote_plus, unquote, urlencode, urlparse -PY2 = sys.version_info[0] == 2 +string_types = str, bytes +map = map -if PY2: - string_types = (basestring,) # noqa: F821 - from itertools import imap as map - from urllib import quote, quote_plus, unquote, urlencode - from Queue import Queue - from urlparse import urlparse +def to_str(x, encoding="ascii"): + if not isinstance(x, str): + return x.decode(encoding) + return x - def to_str(x, encoding="ascii"): - if not isinstance(x, str): - return x.encode(encoding) - return x - to_bytes = to_str - -else: - string_types = str, bytes - from urllib.parse import quote, quote_plus, unquote, urlencode, urlparse - - map = map - from queue import Queue - - def to_str(x, encoding="ascii"): - if not isinstance(x, str): - return x.decode(encoding) - return x - - def to_bytes(x, encoding="ascii"): - if not isinstance(x, bytes): - return x.encode(encoding) - return x +def to_bytes(x, encoding="ascii"): + if not isinstance(x, bytes): + return x.encode(encoding) + return x try: diff --git a/opensearchpy/compat.pyi b/opensearchpy/compat.pyi index d3dc0a08..2606c723 100644 --- a/opensearchpy/compat.pyi +++ b/opensearchpy/compat.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -27,7 +28,6 @@ import sys from typing import Callable, Tuple, Type, Union -PY2: bool string_types: Tuple[type, ...] to_str: Callable[[Union[str, bytes]], str] diff --git a/opensearchpy/connection/__init__.py b/opensearchpy/connection/__init__.py index 6e331a54..40037859 100644 --- a/opensearchpy/connection/__init__.py +++ b/opensearchpy/connection/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/__init__.pyi b/opensearchpy/connection/__init__.pyi index ad1d9e62..f3f31016 100644 --- a/opensearchpy/connection/__init__.pyi +++ b/opensearchpy/connection/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/async_connections.py b/opensearchpy/connection/async_connections.py index acaa0b68..87dd22d7 100644 --- a/opensearchpy/connection/async_connections.py +++ b/opensearchpy/connection/async_connections.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/async_connections.pyi b/opensearchpy/connection/async_connections.pyi index 8935ec6b..eb310cdf 100644 --- a/opensearchpy/connection/async_connections.pyi +++ b/opensearchpy/connection/async_connections.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/base.py b/opensearchpy/connection/base.py index 05edca73..ee8d934f 100644 --- a/opensearchpy/connection/base.py +++ b/opensearchpy/connection/base.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/base.pyi b/opensearchpy/connection/base.pyi index 7e51d20c..333f4a70 100644 --- a/opensearchpy/connection/base.pyi +++ b/opensearchpy/connection/base.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/connections.py b/opensearchpy/connection/connections.py index 857cba3a..4401ade0 100644 --- a/opensearchpy/connection/connections.py +++ b/opensearchpy/connection/connections.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/connections.pyi b/opensearchpy/connection/connections.pyi index 07814ba4..d763f57c 100644 --- a/opensearchpy/connection/connections.pyi +++ b/opensearchpy/connection/connections.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_async.py b/opensearchpy/connection/http_async.py index 10f5a56a..b7288005 100644 --- a/opensearchpy/connection/http_async.py +++ b/opensearchpy/connection/http_async.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_async.pyi b/opensearchpy/connection/http_async.pyi index adde809b..9fcfb246 100644 --- a/opensearchpy/connection/http_async.pyi +++ b/opensearchpy/connection/http_async.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_requests.py b/opensearchpy/connection/http_requests.py index e0b6d143..f9e9b1a1 100644 --- a/opensearchpy/connection/http_requests.py +++ b/opensearchpy/connection/http_requests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_requests.pyi b/opensearchpy/connection/http_requests.pyi index c9bb5617..61b6d496 100644 --- a/opensearchpy/connection/http_requests.pyi +++ b/opensearchpy/connection/http_requests.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_urllib3.py b/opensearchpy/connection/http_urllib3.py index 0042cc3c..bde689ae 100644 --- a/opensearchpy/connection/http_urllib3.py +++ b/opensearchpy/connection/http_urllib3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/http_urllib3.pyi b/opensearchpy/connection/http_urllib3.pyi index 83d62117..7fe27617 100644 --- a/opensearchpy/connection/http_urllib3.pyi +++ b/opensearchpy/connection/http_urllib3.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/pooling.py b/opensearchpy/connection/pooling.py index bd9fe5f9..48503a58 100644 --- a/opensearchpy/connection/pooling.py +++ b/opensearchpy/connection/pooling.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection/pooling.pyi b/opensearchpy/connection/pooling.pyi index b32fd068..53e38f40 100644 --- a/opensearchpy/connection/pooling.pyi +++ b/opensearchpy/connection/pooling.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection_pool.py b/opensearchpy/connection_pool.py index 61873748..7ff15512 100644 --- a/opensearchpy/connection_pool.py +++ b/opensearchpy/connection_pool.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/connection_pool.pyi b/opensearchpy/connection_pool.pyi index 7a528cf4..e219591c 100644 --- a/opensearchpy/connection_pool.pyi +++ b/opensearchpy/connection_pool.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/exceptions.py b/opensearchpy/exceptions.py index cc35c91f..f2f994ca 100644 --- a/opensearchpy/exceptions.py +++ b/opensearchpy/exceptions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/exceptions.pyi b/opensearchpy/exceptions.pyi index 8adafdd8..0ecacc6f 100644 --- a/opensearchpy/exceptions.pyi +++ b/opensearchpy/exceptions.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/__init__.py b/opensearchpy/helpers/__init__.py index 8057de7e..7116dc48 100644 --- a/opensearchpy/helpers/__init__.py +++ b/opensearchpy/helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/__init__.pyi b/opensearchpy/helpers/__init__.pyi index 01d4973c..24c0d13d 100644 --- a/opensearchpy/helpers/__init__.pyi +++ b/opensearchpy/helpers/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/actions.py b/opensearchpy/helpers/actions.py index e565256f..587444a3 100644 --- a/opensearchpy/helpers/actions.py +++ b/opensearchpy/helpers/actions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/actions.pyi b/opensearchpy/helpers/actions.pyi index 4fee4bd1..e1ee4254 100644 --- a/opensearchpy/helpers/actions.pyi +++ b/opensearchpy/helpers/actions.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/aggs.py b/opensearchpy/helpers/aggs.py index 5a7f800c..db7d2c28 100644 --- a/opensearchpy/helpers/aggs.py +++ b/opensearchpy/helpers/aggs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/aggs.pyi b/opensearchpy/helpers/aggs.pyi index e3f6e93c..08b74a3a 100644 --- a/opensearchpy/helpers/aggs.pyi +++ b/opensearchpy/helpers/aggs.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/analysis.py b/opensearchpy/helpers/analysis.py index 251b004a..4e2646d7 100644 --- a/opensearchpy/helpers/analysis.py +++ b/opensearchpy/helpers/analysis.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/analysis.pyi b/opensearchpy/helpers/analysis.pyi index b4f37af5..364a6ea5 100644 --- a/opensearchpy/helpers/analysis.pyi +++ b/opensearchpy/helpers/analysis.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/asyncsigner.py b/opensearchpy/helpers/asyncsigner.py index e21007d3..7f063c9f 100644 --- a/opensearchpy/helpers/asyncsigner.py +++ b/opensearchpy/helpers/asyncsigner.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/asyncsigner.pyi b/opensearchpy/helpers/asyncsigner.pyi index 2c701bb9..e0b5a7b5 100644 --- a/opensearchpy/helpers/asyncsigner.pyi +++ b/opensearchpy/helpers/asyncsigner.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/document.py b/opensearchpy/helpers/document.py index 7d45275a..de9891bc 100644 --- a/opensearchpy/helpers/document.py +++ b/opensearchpy/helpers/document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/document.pyi b/opensearchpy/helpers/document.pyi index d740b931..89ca6426 100644 --- a/opensearchpy/helpers/document.pyi +++ b/opensearchpy/helpers/document.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/errors.py b/opensearchpy/helpers/errors.py index dc9e62da..5d05bd23 100644 --- a/opensearchpy/helpers/errors.py +++ b/opensearchpy/helpers/errors.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/errors.pyi b/opensearchpy/helpers/errors.pyi index bed92df7..9572d68f 100644 --- a/opensearchpy/helpers/errors.pyi +++ b/opensearchpy/helpers/errors.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/faceted_search.py b/opensearchpy/helpers/faceted_search.py index 6da84dc2..e1bf9c0e 100644 --- a/opensearchpy/helpers/faceted_search.py +++ b/opensearchpy/helpers/faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/faceted_search.pyi b/opensearchpy/helpers/faceted_search.pyi index 3f1d175b..d3ff998d 100644 --- a/opensearchpy/helpers/faceted_search.pyi +++ b/opensearchpy/helpers/faceted_search.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/field.py b/opensearchpy/helpers/field.py index 756a3a0e..edeaecf0 100644 --- a/opensearchpy/helpers/field.py +++ b/opensearchpy/helpers/field.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/field.pyi b/opensearchpy/helpers/field.pyi index 3704aa81..3b448641 100644 --- a/opensearchpy/helpers/field.pyi +++ b/opensearchpy/helpers/field.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/function.py b/opensearchpy/helpers/function.py index 5b8db7b0..47b7b148 100644 --- a/opensearchpy/helpers/function.py +++ b/opensearchpy/helpers/function.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/function.pyi b/opensearchpy/helpers/function.pyi index 58a00fba..72b4b342 100644 --- a/opensearchpy/helpers/function.pyi +++ b/opensearchpy/helpers/function.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/index.py b/opensearchpy/helpers/index.py index d6e08b50..6bbc23e8 100644 --- a/opensearchpy/helpers/index.py +++ b/opensearchpy/helpers/index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/index.pyi b/opensearchpy/helpers/index.pyi index 2bf5747e..e2f95797 100644 --- a/opensearchpy/helpers/index.pyi +++ b/opensearchpy/helpers/index.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/mapping.py b/opensearchpy/helpers/mapping.py index 9270da97..8fd37348 100644 --- a/opensearchpy/helpers/mapping.py +++ b/opensearchpy/helpers/mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/mapping.pyi b/opensearchpy/helpers/mapping.pyi index 8dab731a..99a82935 100644 --- a/opensearchpy/helpers/mapping.pyi +++ b/opensearchpy/helpers/mapping.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/query.py b/opensearchpy/helpers/query.py index e132254b..784435d6 100644 --- a/opensearchpy/helpers/query.py +++ b/opensearchpy/helpers/query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/query.pyi b/opensearchpy/helpers/query.pyi index a963ef05..673e83f9 100644 --- a/opensearchpy/helpers/query.pyi +++ b/opensearchpy/helpers/query.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/__init__.py b/opensearchpy/helpers/response/__init__.py index 91e4c044..d4792b11 100644 --- a/opensearchpy/helpers/response/__init__.py +++ b/opensearchpy/helpers/response/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/__init__.pyi b/opensearchpy/helpers/response/__init__.pyi index 3f3af097..f592e46a 100644 --- a/opensearchpy/helpers/response/__init__.pyi +++ b/opensearchpy/helpers/response/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/aggs.py b/opensearchpy/helpers/response/aggs.py index a5e2e22d..c8e7d5dd 100644 --- a/opensearchpy/helpers/response/aggs.py +++ b/opensearchpy/helpers/response/aggs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/aggs.pyi b/opensearchpy/helpers/response/aggs.pyi index ba92e56b..d943dbdd 100644 --- a/opensearchpy/helpers/response/aggs.pyi +++ b/opensearchpy/helpers/response/aggs.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/hit.py b/opensearchpy/helpers/response/hit.py index cf70a821..8f6230aa 100644 --- a/opensearchpy/helpers/response/hit.py +++ b/opensearchpy/helpers/response/hit.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/response/hit.pyi b/opensearchpy/helpers/response/hit.pyi index ae3cdf00..7597832d 100644 --- a/opensearchpy/helpers/response/hit.pyi +++ b/opensearchpy/helpers/response/hit.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/search.py b/opensearchpy/helpers/search.py index 0652b60a..30c59a92 100644 --- a/opensearchpy/helpers/search.py +++ b/opensearchpy/helpers/search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/search.pyi b/opensearchpy/helpers/search.pyi index 92b46243..49eecb0d 100644 --- a/opensearchpy/helpers/search.pyi +++ b/opensearchpy/helpers/search.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/signer.py b/opensearchpy/helpers/signer.py index 436909e7..ad6e9c65 100644 --- a/opensearchpy/helpers/signer.py +++ b/opensearchpy/helpers/signer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/test.py b/opensearchpy/helpers/test.py index 9338636a..e2467584 100644 --- a/opensearchpy/helpers/test.py +++ b/opensearchpy/helpers/test.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/test.pyi b/opensearchpy/helpers/test.pyi index 1363f821..a4d2302a 100644 --- a/opensearchpy/helpers/test.pyi +++ b/opensearchpy/helpers/test.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/update_by_query.py b/opensearchpy/helpers/update_by_query.py index 3be888bf..32c7b705 100644 --- a/opensearchpy/helpers/update_by_query.py +++ b/opensearchpy/helpers/update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/update_by_query.pyi b/opensearchpy/helpers/update_by_query.pyi index 90597033..c0baf631 100644 --- a/opensearchpy/helpers/update_by_query.pyi +++ b/opensearchpy/helpers/update_by_query.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/utils.py b/opensearchpy/helpers/utils.py index 04f2ee37..4cd9dad8 100644 --- a/opensearchpy/helpers/utils.py +++ b/opensearchpy/helpers/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/utils.pyi b/opensearchpy/helpers/utils.pyi index 74783974..decb7382 100644 --- a/opensearchpy/helpers/utils.pyi +++ b/opensearchpy/helpers/utils.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/wrappers.py b/opensearchpy/helpers/wrappers.py index 19cf3dec..968909a6 100644 --- a/opensearchpy/helpers/wrappers.py +++ b/opensearchpy/helpers/wrappers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/helpers/wrappers.pyi b/opensearchpy/helpers/wrappers.pyi index fc79c384..704159dc 100644 --- a/opensearchpy/helpers/wrappers.pyi +++ b/opensearchpy/helpers/wrappers.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/__init__.py b/opensearchpy/plugins/__init__.py index 2f42da79..b0a5fb09 100644 --- a/opensearchpy/plugins/__init__.py +++ b/opensearchpy/plugins/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/__init__.pyi b/opensearchpy/plugins/__init__.pyi index 6c0097cd..22c54ac8 100644 --- a/opensearchpy/plugins/__init__.pyi +++ b/opensearchpy/plugins/__init__.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/alerting.py b/opensearchpy/plugins/alerting.py index defbf326..66b759a4 100644 --- a/opensearchpy/plugins/alerting.py +++ b/opensearchpy/plugins/alerting.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/alerting.pyi b/opensearchpy/plugins/alerting.pyi index d712e762..4454bf3c 100644 --- a/opensearchpy/plugins/alerting.pyi +++ b/opensearchpy/plugins/alerting.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/index_management.py b/opensearchpy/plugins/index_management.py index 435ab8d4..9f4f5a6e 100644 --- a/opensearchpy/plugins/index_management.py +++ b/opensearchpy/plugins/index_management.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/plugins/index_management.pyi b/opensearchpy/plugins/index_management.pyi index 24a59dc9..d4a6dbad 100644 --- a/opensearchpy/plugins/index_management.pyi +++ b/opensearchpy/plugins/index_management.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/serializer.py b/opensearchpy/serializer.py index 295c4af0..7463dbcc 100644 --- a/opensearchpy/serializer.py +++ b/opensearchpy/serializer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/serializer.pyi b/opensearchpy/serializer.pyi index c68f51ca..6d798cce 100644 --- a/opensearchpy/serializer.pyi +++ b/opensearchpy/serializer.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/transport.py b/opensearchpy/transport.py index 301955df..c36178b2 100644 --- a/opensearchpy/transport.py +++ b/opensearchpy/transport.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/opensearchpy/transport.pyi b/opensearchpy/transport.pyi index dfdcedb8..fe33cfda 100644 --- a/opensearchpy/transport.pyi +++ b/opensearchpy/transport.pyi @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/samples/advanced_index_actions/advanced_index_actions_sample.py b/samples/advanced_index_actions/advanced_index_actions_sample.py index 391d36b9..96d7d742 100644 --- a/samples/advanced_index_actions/advanced_index_actions_sample.py +++ b/samples/advanced_index_actions/advanced_index_actions_sample.py @@ -1,6 +1,17 @@ -from opensearchpy import OpenSearch +#!/usr/bin/env python + +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. import time +from opensearchpy import OpenSearch # For cleaner output, comment in the two lines below to disable warnings and informational messages # import urllib3 @@ -10,73 +21,84 @@ def test_opensearch_examples(): # Set up client = OpenSearch( - hosts=['https://localhost:9200'], + hosts=["https://localhost:9200"], use_ssl=True, verify_certs=False, - http_auth=('admin', 'admin') + http_auth=("admin", "admin"), ) - client.indices.create(index='movies') + client.indices.create(index="movies") print("'movies' index created!") - + # Test Clear Index Cache - client.indices.clear_cache(index='movies') + client.indices.clear_cache(index="movies") print("Cache for 'movies' index cleared!") - client.indices.clear_cache(index='movies', query=True) + client.indices.clear_cache(index="movies", query=True) print("Query cache for 'movies' index cleared!") - client.indices.clear_cache(index='movies', fielddata=True, request=True) + client.indices.clear_cache(index="movies", fielddata=True, request=True) print("Field data and request cache for 'movies' index cleared!") - + # Test Flush Index - client.indices.flush(index='movies') + client.indices.flush(index="movies") print("'movies' index flushed!") - + # Test Refresh Index - client.indices.refresh(index='movies') + client.indices.refresh(index="movies") print("'movies' index refreshed!") - + # Test Close or Open Index - client.indices.close(index='movies') + client.indices.close(index="movies") print("'movies' index closed!") time.sleep(2) # add sleep to ensure the index has time to close - client.indices.open(index='movies') + client.indices.open(index="movies") print("'movies' index opened!") - + # Test Force Merge Index - client.indices.forcemerge(index='movies') + client.indices.forcemerge(index="movies") print("'movies' index force merged!") - + # Test Clone - client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': True}}}) + client.indices.put_settings( + index="movies", body={"index": {"blocks": {"write": True}}} + ) print("Write operations blocked for 'movies' index!") time.sleep(2) - client.indices.clone(index='movies', target='movies_clone') + client.indices.clone(index="movies", target="movies_clone") print("'movies' index cloned to 'movies_clone'!") - client.indices.put_settings(index='movies', body={'index': {'blocks': {'write': False}}}) + client.indices.put_settings( + index="movies", body={"index": {"blocks": {"write": False}}} + ) print("Write operations enabled for 'movies' index!") - - # Test Split + + # Test Split client.indices.create( - index='books', - body={'settings': { - 'index': {'number_of_shards': 5, 'number_of_routing_shards': 30, 'blocks': {'write': True}}}} + index="books", + body={ + "settings": { + "index": { + "number_of_shards": 5, + "number_of_routing_shards": 30, + "blocks": {"write": True}, + } + } + }, ) print("'books' index created!") time.sleep(2) # add sleep to ensure the index has time to become read-only client.indices.split( - index='books', - target='bigger_books', - body={'settings': {'index': {'number_of_shards': 10 }}} + index="books", + target="bigger_books", + body={"settings": {"index": {"number_of_shards": 10}}}, ) print("'books' index split into 'bigger_books'!") - client.indices.put_settings(index='books', body={'index': {'blocks': {'write': False}}}) + client.indices.put_settings( + index="books", body={"index": {"blocks": {"write": False}}} + ) print("Write operations enabled for 'books' index!") - + # Cleanup - client.indices.delete(index=['movies', 'books', 'movies_clone', 'bigger_books']) + client.indices.delete(index=["movies", "books", "movies_clone", "bigger_books"]) print("All indices deleted!") - - if __name__ == "__main__": - test_opensearch_examples() \ No newline at end of file + test_opensearch_examples() diff --git a/samples/aws/search-requests.py b/samples/aws/search-requests.py index 1f14f55e..0af366f0 100644 --- a/samples/aws/search-requests.py +++ b/samples/aws/search-requests.py @@ -1,3 +1,6 @@ +#!/usr/bin/env python + +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -8,62 +11,59 @@ # GitHub history for details. import logging - from os import environ from time import sleep from urllib.parse import urlparse from boto3 import Session -from opensearchpy import RequestsAWSV4SignerAuth, OpenSearch, RequestsHttpConnection + +from opensearchpy import OpenSearch, RequestsAWSV4SignerAuth, RequestsHttpConnection # verbose logging -logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) +logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) # cluster endpoint, for example: my-test-domain.us-east-1.es.amazonaws.com -url = urlparse(environ['ENDPOINT']) -region = environ.get('AWS_REGION', 'us-east-1') -service = environ.get('SERVICE', 'es') +url = urlparse(environ["ENDPOINT"]) +region = environ.get("AWS_REGION", "us-east-1") +service = environ.get("SERVICE", "es") credentials = Session().get_credentials() auth = RequestsAWSV4SignerAuth(credentials, region, service) client = OpenSearch( - hosts=[{ - 'host': url.netloc, - 'port': url.port or 443 - }], - http_auth=auth, - use_ssl=True, - verify_certs=True, - connection_class=RequestsHttpConnection, - timeout=30 + hosts=[{"host": url.netloc, "port": url.port or 443}], + http_auth=auth, + use_ssl=True, + verify_certs=True, + connection_class=RequestsHttpConnection, + timeout=30, ) # TODO: remove when OpenSearch Serverless adds support for / -if service == 'es': - info = client.info() - print(f"{info['version']['distribution']}: {info['version']['number']}") +if service == "es": + info = client.info() + print(f"{info['version']['distribution']}: {info['version']['number']}") # create an index -index = 'movies' +index = "movies" client.indices.create(index=index) try: - # index data - document = {'director': 'Bennett Miller', 'title': 'Moneyball', 'year': 2011} - client.index(index=index, body=document, id='1') + # index data + document = {"director": "Bennett Miller", "title": "Moneyball", "year": 2011} + client.index(index=index, body=document, id="1") - # wait for the document to index - sleep(1) + # wait for the document to index + sleep(1) - # search for the document - results = client.search(body={'query': {'match': {'director': 'miller'}}}) - for hit in results['hits']['hits']: - print(hit['_source']) + # search for the document + results = client.search(body={"query": {"match": {"director": "miller"}}}) + for hit in results["hits"]["hits"]: + print(hit["_source"]) - # delete the document - client.delete(index=index, id='1') + # delete the document + client.delete(index=index, id="1") finally: - # delete the index - client.indices.delete(index=index) \ No newline at end of file + # delete the index + client.indices.delete(index=index) diff --git a/samples/aws/search-urllib3.py b/samples/aws/search-urllib3.py index 46d6a89f..534caf40 100644 --- a/samples/aws/search-urllib3.py +++ b/samples/aws/search-urllib3.py @@ -1,3 +1,6 @@ +#!/usr/bin/env python + +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -8,62 +11,59 @@ # GitHub history for details. import logging - from os import environ from time import sleep from urllib.parse import urlparse from boto3 import Session -from opensearchpy import Urllib3AWSV4SignerAuth, OpenSearch, Urllib3HttpConnection + +from opensearchpy import OpenSearch, Urllib3AWSV4SignerAuth, Urllib3HttpConnection # verbose logging -logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) +logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) # cluster endpoint, for example: my-test-domain.us-east-1.es.amazonaws.com -url = urlparse(environ['ENDPOINT']) -region = environ.get('AWS_REGION', 'us-east-1') -service = environ.get('SERVICE', 'es') +url = urlparse(environ["ENDPOINT"]) +region = environ.get("AWS_REGION", "us-east-1") +service = environ.get("SERVICE", "es") credentials = Session().get_credentials() auth = Urllib3AWSV4SignerAuth(credentials, region, service) client = OpenSearch( - hosts=[{ - 'host': url.netloc, - 'port': url.port or 443 - }], - http_auth=auth, - use_ssl=True, - verify_certs=True, - connection_class=Urllib3HttpConnection, - timeout=30 + hosts=[{"host": url.netloc, "port": url.port or 443}], + http_auth=auth, + use_ssl=True, + verify_certs=True, + connection_class=Urllib3HttpConnection, + timeout=30, ) # TODO: remove when OpenSearch Serverless adds support for / -if service == 'es': - info = client.info() - print(f"{info['version']['distribution']}: {info['version']['number']}") +if service == "es": + info = client.info() + print(f"{info['version']['distribution']}: {info['version']['number']}") # create an index -index = 'movies' +index = "movies" client.indices.create(index=index) try: - # index data - document = {'director': 'Bennett Miller', 'title': 'Moneyball', 'year': 2011} - client.index(index=index, body=document, id='1') + # index data + document = {"director": "Bennett Miller", "title": "Moneyball", "year": 2011} + client.index(index=index, body=document, id="1") - # wait for the document to index - sleep(1) + # wait for the document to index + sleep(1) - # search for the document - results = client.search(body={'query': {'match': {'director': 'miller'}}}) - for hit in results['hits']['hits']: - print(hit['_source']) + # search for the document + results = client.search(body={"query": {"match": {"director": "miller"}}}) + for hit in results["hits"]["hits"]: + print(hit["_source"]) - # delete the document - client.delete(index=index, id='1') + # delete the document + client.delete(index=index, id="1") finally: - # delete the index - client.indices.delete(index=index) \ No newline at end of file + # delete the index + client.indices.delete(index=index) diff --git a/samples/bulk/bulk-array.py b/samples/bulk/bulk-array.py index 8df6fa63..1859d541 100755 --- a/samples/bulk/bulk-array.py +++ b/samples/bulk/bulk-array.py @@ -1,59 +1,58 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import os -import json from opensearchpy import OpenSearch # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists index_name = "my-index" if not client.indices.exists(index_name): - - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "mappings":{ + "mappings": { "properties": { - "value": { - "type": "float" - }, + "value": {"type": "float"}, } } - } + }, ) # index data data = [] for i in range(100): - data.append({ "index": {"_index": index_name, "_id": i }}) - data.append({ "value": i }) + data.append({"index": {"_index": index_name, "_id": i}}) + data.append({"value": i}) rc = client.bulk(data) if rc["errors"]: - print(f"There were errors:") + print("There were errors:") for item in rc["items"]: print(f"{item['index']['status']}: {item['index']['error']['type']}") else: @@ -61,4 +60,3 @@ # delete index client.indices.delete(index=index_name) - diff --git a/samples/bulk/bulk-helpers.py b/samples/bulk/bulk-helpers.py index 1210ee86..3dc165c8 100755 --- a/samples/bulk/bulk-helpers.py +++ b/samples/bulk/bulk-helpers.py @@ -1,58 +1,56 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import os -import json from opensearchpy import OpenSearch, helpers # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists index_name = "my-index" if not client.indices.exists(index_name): - - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "mappings":{ + "mappings": { "properties": { - "value": { - "type": "float" - }, + "value": {"type": "float"}, } } - } + }, ) # index data data = [] for i in range(100): - data.append({ "_index": index_name, "_id": i, "value": i }) + data.append({"_index": index_name, "_id": i, "value": i}) rc = helpers.bulk(client, data) print(f"Bulk-inserted {rc[0]} items.") # delete index client.indices.delete(index=index_name) - diff --git a/samples/bulk/bulk-ld.py b/samples/bulk/bulk-ld.py index 5487c68f..fff0ae98 100755 --- a/samples/bulk/bulk-ld.py +++ b/samples/bulk/bulk-ld.py @@ -1,59 +1,59 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + -import os import json +import os from opensearchpy import OpenSearch # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists index_name = "my-index" if not client.indices.exists(index_name): - - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "mappings":{ + "mappings": { "properties": { - "value": { - "type": "float" - }, + "value": {"type": "float"}, } } - } + }, ) # index data -data = '' +data = "" for i in range(100): - data += json.dumps({ "index": {"_index": index_name, "_id": i }}) + "\n" - data += json.dumps({ "value": i }) + "\n" + data += json.dumps({"index": {"_index": index_name, "_id": i}}) + "\n" + data += json.dumps({"value": i}) + "\n" rc = client.bulk(data) if rc["errors"]: - print(f"There were errors:") + print("There were errors:") for item in rc["items"]: print(f"{item['index']['status']}: {item['index']['error']['type']}") else: @@ -61,4 +61,3 @@ # delete index client.indices.delete(index=index_name) - diff --git a/samples/hello/hello-async.py b/samples/hello/hello-async.py index 572ef91c..9975f575 100755 --- a/samples/hello/hello-async.py +++ b/samples/hello/hello-async.py @@ -1,107 +1,96 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import asyncio from opensearchpy import AsyncOpenSearch + async def main(): # connect to OpenSearch - host = 'localhost' + host = "localhost" port = 9200 - auth = ('admin', 'admin') # For testing only. Don't store credentials in code. + auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = AsyncOpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) try: - info = await client.info() - print(f"Welcome to {info['version']['distribution']} {info['version']['number']}!") + info = await client.info() + print( + f"Welcome to {info['version']['distribution']} {info['version']['number']}!" + ) + + # create an index + + index_name = "test-index" + + index_body = {"settings": {"index": {"number_of_shards": 4}}} + + if not await client.indices.exists(index=index_name): + await client.indices.create(index_name, body=index_body) + + # add some documents to the index, asynchronously + await asyncio.gather( + *[ + client.index( + index=index_name, + body={ + "title": f"Moneyball {i}", + "director": "Bennett Miller", + "year": "2011", + }, + id=i, + ) + for i in range(10) + ] + ) - # create an index + # refresh the index + await client.indices.refresh(index=index_name) - index_name = 'test-index' + # search for a document + q = "miller" - index_body = { - 'settings': { - 'index': { - 'number_of_shards': 4 - } + query = { + "size": 5, + "query": {"multi_match": {"query": q, "fields": ["title^2", "director"]}}, } - } - if not await client.indices.exists(index=index_name): - await client.indices.create( - index_name, - body=index_body + results = await client.search(body=query, index=index_name) + + for hit in results["hits"]["hits"]: + print(hit) + + # delete the documents + await asyncio.gather( + *[client.delete(index=index_name, id=i) for i in range(10)] ) - # add some documents to the index, asynchronously - await asyncio.gather(*[ - client.index( - index = index_name, - body = { - 'title': f"Moneyball {i}", - 'director': 'Bennett Miller', - 'year': '2011' - }, - id = i - ) for i in range(10) - ]) - - # refresh the index - await client.indices.refresh(index=index_name) - - # search for a document - q = 'miller' - - query = { - 'size': 5, - 'query': { - 'multi_match': { - 'query': q, - 'fields': ['title^2', 'director'] - } - } - } - - results = await client.search( - body = query, - index = index_name - ) - - for hit in results["hits"]["hits"]: - print(hit) - - # delete the documents - await asyncio.gather(*[ - client.delete( - index = index_name, - id = i - ) for i in range(10) - ]) - - # delete the index - await client.indices.delete( - index = index_name - ) - - finally: - await client.close() + # delete the index + await client.indices.delete(index=index_name) + + finally: + await client.close() + if __name__ == "__main__": loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(main()) loop.close() - diff --git a/samples/hello/hello.py b/samples/hello/hello.py index d72c2ab7..0b589c9d 100755 --- a/samples/hello/hello.py +++ b/samples/hello/hello.py @@ -1,25 +1,30 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + from opensearchpy import OpenSearch # connect to OpenSearch -host = 'localhost' +host = "localhost" port = 9200 -auth = ('admin', 'admin') # For testing only. Don't store credentials in code. +auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) info = client.info() @@ -27,76 +32,45 @@ # create an index -index_name = 'test-index' +index_name = "test-index" -index_body = { - 'settings': { - 'index': { - 'number_of_shards': 4 - } - } -} +index_body = {"settings": {"index": {"number_of_shards": 4}}} -response = client.indices.create( - index_name, - body=index_body -) +response = client.indices.create(index_name, body=index_body) print(response) # add a document to the index -document = { - 'title': 'Moneyball', - 'director': 'Bennett Miller', - 'year': '2011' -} +document = {"title": "Moneyball", "director": "Bennett Miller", "year": "2011"} -id = '1' +id = "1" -response = client.index( - index = index_name, - body = document, - id = id, - refresh = True -) +response = client.index(index=index_name, body=document, id=id, refresh=True) print(response) # search for a document -q = 'miller' +q = "miller" query = { - 'size': 5, - 'query': { - 'multi_match': { - 'query': q, - 'fields': ['title^2', 'director'] - } - } + "size": 5, + "query": {"multi_match": {"query": q, "fields": ["title^2", "director"]}}, } -response = client.search( - body = query, - index = index_name -) +response = client.search(body=query, index=index_name) print(response) # delete the document -response = client.delete( - index = index_name, - id = id -) +response = client.delete(index=index_name, id=id) print(response) # delete the index -response = client.indices.delete( - index = index_name -) +response = client.indices.delete(index=index_name) print(response) diff --git a/samples/index_template/index_template_sample.py b/samples/index_template/index_template_sample.py index dab504be..4fe580ac 100644 --- a/samples/index_template/index_template_sample.py +++ b/samples/index_template/index_template_sample.py @@ -1,143 +1,129 @@ +#!/usr/bin/env python + +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. from opensearchpy import OpenSearch # Create a client instance client = OpenSearch( - hosts=['https://localhost:9200'], - use_ssl=True, - verify_certs=False, - http_auth=('admin', 'admin') + hosts=["https://localhost:9200"], + use_ssl=True, + verify_certs=False, + http_auth=("admin", "admin"), ) # You can create an index template to define default settings and mappings for indices of certain patterns. The following example creates an index template named `books` with default settings and mappings for indices of the `books-*` pattern: client.indices.put_index_template( -name='books', -body={ - 'index_patterns': ['books-*'], - 'priority': 1, - 'template': { - 'settings': { - 'index': { - 'number_of_shards': 3, - 'number_of_replicas': 0 - } + name="books", + body={ + "index_patterns": ["books-*"], + "priority": 1, + "template": { + "settings": {"index": {"number_of_shards": 3, "number_of_replicas": 0}}, + "mappings": { + "properties": { + "title": {"type": "text"}, + "author": {"type": "text"}, + "published_on": {"type": "date"}, + "pages": {"type": "integer"}, + } + }, + }, }, - 'mappings': { - 'properties': { - 'title': { 'type': 'text' }, - 'author': { 'type': 'text' }, - 'published_on': { 'type': 'date' }, - 'pages': { 'type': 'integer' } - } - } - } -} ) # Now, when you create an index that matches the `books-*` pattern, OpenSearch will automatically apply the template's settings and mappings to the index. Let's create an index named books-nonfiction and verify that its settings and mappings match those of the template: -client.indices.create(index='books-nonfiction') -print(client.indices.get(index='books-nonfiction')) +client.indices.create(index="books-nonfiction") +print(client.indices.get(index="books-nonfiction")) # If multiple index templates match the index's name, OpenSearch will apply the template with the highest `priority`. The following example creates two index templates named `books-*` and `books-fiction-*` with different settings: client.indices.put_index_template( -name='books', -body={ - 'index_patterns': ['books-*'], - 'priority': 1, - 'template': { - 'settings': { - 'index': { - 'number_of_shards': 3, - 'number_of_replicas': 0 - } - } - } -} + name="books", + body={ + "index_patterns": ["books-*"], + "priority": 1, + "template": { + "settings": {"index": {"number_of_shards": 3, "number_of_replicas": 0}} + }, + }, ) client.indices.put_index_template( -name='books-fiction', -body={ - 'index_patterns': ['books-fiction-*'], - 'priority': 2, - 'template': { - 'settings': { - 'index': { - 'number_of_shards': 1, - 'number_of_replicas': 1 - } - } - } -} + name="books-fiction", + body={ + "index_patterns": ["books-fiction-*"], + "priority": 2, + "template": { + "settings": {"index": {"number_of_shards": 1, "number_of_replicas": 1}} + }, + }, ) # # Test multiple index templates -client.indices.create(index='books-fiction-romance') -print(client.indices.get(index='books-fiction-romance')) +client.indices.create(index="books-fiction-romance") +print(client.indices.get(index="books-fiction-romance")) # Composable index templates are a new type of index template that allow you to define multiple component templates and compose them into a final template. The following example creates a component template named `books_mappings` with default mappings for indices of the `books-*` and `books-fiction-*` patterns: client.cluster.put_component_template( -name='books_mappings', -body={ - 'template': { - 'mappings': { - 'properties': { - 'title': { 'type': 'text' }, - 'author': { 'type': 'text' }, - 'published_on': { 'type': 'date' }, - 'pages': { 'type': 'integer' } - } - } - } -} + name="books_mappings", + body={ + "template": { + "mappings": { + "properties": { + "title": {"type": "text"}, + "author": {"type": "text"}, + "published_on": {"type": "date"}, + "pages": {"type": "integer"}, + } + } + } + }, ) client.indices.put_index_template( -name='books', -body={ - 'index_patterns': ['books-*'], - 'composed_of': ['books_mappings'], - 'priority': 4, - 'template': { - 'settings': { - 'index': { - 'number_of_shards': 3, - 'number_of_replicas': 0 - } - } - } -} + name="books", + body={ + "index_patterns": ["books-*"], + "composed_of": ["books_mappings"], + "priority": 4, + "template": { + "settings": {"index": {"number_of_shards": 3, "number_of_replicas": 0}} + }, + }, ) client.indices.put_index_template( -name='books-fiction', -body={ - 'index_patterns': ['books-fiction-*'], - 'composed_of': ['books_mappings'], - 'priority': 5, - 'template': { - 'settings': { - 'index': { - 'number_of_shards': 1, - 'number_of_replicas': 1 - } - } - } -} + name="books-fiction", + body={ + "index_patterns": ["books-fiction-*"], + "composed_of": ["books_mappings"], + "priority": 5, + "template": { + "settings": {"index": {"number_of_shards": 1, "number_of_replicas": 1}} + }, + }, ) # Test composable index templates -client.indices.create(index='books-fiction-horror') -print(client.indices.get(index='books-fiction-horror')) +client.indices.create(index="books-fiction-horror") +print(client.indices.get(index="books-fiction-horror")) # Get an index template -print(client.indices.get_index_template(name='books')) +print(client.indices.get_index_template(name="books")) # Delete an index template -client.indices.delete_index_template(name='books') +client.indices.delete_index_template(name="books") # Cleanup -client.indices.delete(index='books-*') -client.indices.delete_index_template(name='books-fiction') -client.cluster.delete_component_template(name='books_mappings') \ No newline at end of file +client.indices.delete(index="books-*") +client.indices.delete_index_template(name="books-fiction") +client.cluster.delete_component_template(name="books_mappings") diff --git a/samples/json/hello-async.py b/samples/json/hello-async.py index aa4840c4..b9105d35 100755 --- a/samples/json/hello-async.py +++ b/samples/json/hello-async.py @@ -1,90 +1,96 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import asyncio from opensearchpy import AsyncOpenSearch + async def main(): # connect to OpenSearch - host = 'localhost' + host = "localhost" port = 9200 - auth = ('admin', 'admin') # For testing only. Don't store credentials in code. + auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = AsyncOpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) try: - info = await client.transport.perform_request('GET', '/') - print(f"Welcome to {info['version']['distribution']} {info['version']['number']}!") + info = await client.transport.perform_request("GET", "/") + print( + f"Welcome to {info['version']['distribution']} {info['version']['number']}!" + ) - # create an index + # create an index - index_name = 'movies' + index_name = "movies" - index_body = { - 'settings': { - 'index': { - 'number_of_shards': 4 - } - } - } + index_body = {"settings": {"index": {"number_of_shards": 4}}} - print(await client.transport.perform_request("PUT", f"/{index_name}", body=index_body)) + print( + await client.transport.perform_request( + "PUT", f"/{index_name}", body=index_body + ) + ) - # add a document to the index + # add a document to the index - document = { - 'title': 'Moneyball', - 'director': 'Bennett Miller', - 'year': '2011' - } + document = {"title": "Moneyball", "director": "Bennett Miller", "year": "2011"} - id = '1' + id = "1" - print(await client.transport.perform_request("PUT", f"/{index_name}/_doc/{id}?refresh=true", body = document)) + print( + await client.transport.perform_request( + "PUT", f"/{index_name}/_doc/{id}?refresh=true", body=document + ) + ) - # search for a document + # search for a document - q = 'miller' + q = "miller" - query = { - 'size': 5, - 'query': { - 'multi_match': { - 'query': q, - 'fields': ['title^2', 'director'] - } + query = { + "size": 5, + "query": {"multi_match": {"query": q, "fields": ["title^2", "director"]}}, } - } - print(await client.transport.perform_request("POST", f"/{index_name}/_search", body = query)) + print( + await client.transport.perform_request( + "POST", f"/{index_name}/_search", body=query + ) + ) - # delete the document + # delete the document - print(await client.transport.perform_request("DELETE", f"/{index_name}/_doc/{id}")) + print( + await client.transport.perform_request("DELETE", f"/{index_name}/_doc/{id}") + ) - # delete the index + # delete the index - print(await client.transport.perform_request("DELETE", f"/{index_name}")) + print(await client.transport.perform_request("DELETE", f"/{index_name}")) + finally: + await client.close() - finally: - await client.close() if __name__ == "__main__": loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(main()) loop.close() - diff --git a/samples/json/hello.py b/samples/json/hello.py index d5b8e70f..5df36f5f 100755 --- a/samples/json/hello.py +++ b/samples/json/hello.py @@ -1,71 +1,65 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + from opensearchpy import OpenSearch # connect to OpenSearch -host = 'localhost' +host = "localhost" port = 9200 -auth = ('admin', 'admin') # For testing only. Don't store credentials in code. +auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) -info = client.transport.perform_request('GET', '/') +info = client.transport.perform_request("GET", "/") print(f"Welcome to {info['version']['distribution']} {info['version']['number']}!") # create an index -index_name = 'movies' +index_name = "movies" -index_body = { - 'settings': { - 'index': { - 'number_of_shards': 4 - } - } -} +index_body = {"settings": {"index": {"number_of_shards": 4}}} print(client.transport.perform_request("PUT", f"/{index_name}", body=index_body)) # add a document to the index -document = { - 'title': 'Moneyball', - 'director': 'Bennett Miller', - 'year': '2011' -} +document = {"title": "Moneyball", "director": "Bennett Miller", "year": "2011"} -id = '1' +id = "1" -print(client.transport.perform_request("PUT", f"/{index_name}/_doc/{id}?refresh=true", body = document)) +print( + client.transport.perform_request( + "PUT", f"/{index_name}/_doc/{id}?refresh=true", body=document + ) +) # search for a document -q = 'miller' +q = "miller" query = { - 'size': 5, - 'query': { - 'multi_match': { - 'query': q, - 'fields': ['title^2', 'director'] - } - } + "size": 5, + "query": {"multi_match": {"query": q, "fields": ["title^2", "director"]}}, } -print(client.transport.perform_request("POST", f"/{index_name}/_search", body = query)) +print(client.transport.perform_request("POST", f"/{index_name}/_search", body=query)) # delete the document diff --git a/samples/knn/knn-async-basics.py b/samples/knn/knn-async-basics.py index c237aa46..a7bb9d2f 100755 --- a/samples/knn/knn-async-basics.py +++ b/samples/knn/knn-async-basics.py @@ -1,33 +1,36 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import asyncio import os import random -from opensearchpy import AsyncOpenSearch, AsyncHttpConnection, helpers +from opensearchpy import AsyncHttpConnection, AsyncOpenSearch, helpers + async def main(): # connect to an instance of OpenSearch - host = os.getenv('HOST', default='localhost') - port = int(os.getenv('PORT', 9200)) - auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') - ) + host = os.getenv("HOST", default="localhost") + port = int(os.getenv("PORT", 9200)) + auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = AsyncOpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, connection_class=AsyncHttpConnection, - ssl_show_warn = False + ssl_show_warn=False, ) # check whether an index exists @@ -35,34 +38,32 @@ async def main(): dimensions = 5 if not await client.indices.exists(index_name): - await client.indices.create(index_name, + await client.indices.create( + index_name, body={ - "settings":{ - "index.knn": True - }, - "mappings":{ + "settings": {"index.knn": True}, + "mappings": { "properties": { - "values": { - "type": "knn_vector", - "dimension": dimensions - }, + "values": {"type": "knn_vector", "dimension": dimensions}, } - } - } + }, + }, ) # index data vectors = [] for i in range(10): vec = [] - for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) - - vectors.append({ - "_index": index_name, - "_id": i, - "values": vec, - }) + for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) + + vectors.append( + { + "_index": index_name, + "_id": i, + "values": vec, + } + ) # bulk index await helpers.async_bulk(client, vectors) @@ -71,8 +72,8 @@ async def main(): # search vec = [] - for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) + for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) print(f"Searching for {vec} ...") search_query = {"query": {"knn": {"values": {"vector": vec, "k": 3}}}} @@ -85,9 +86,9 @@ async def main(): await client.close() + if __name__ == "__main__": loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(main()) loop.close() - diff --git a/samples/knn/knn-basics.py b/samples/knn/knn-basics.py index 7868df7e..96efb028 100755 --- a/samples/knn/knn-basics.py +++ b/samples/knn/knn-basics.py @@ -1,10 +1,15 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import os import random @@ -13,19 +18,16 @@ # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists @@ -33,34 +35,32 @@ dimensions = 5 if not client.indices.exists(index_name): - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "settings":{ - "index.knn": True - }, - "mappings":{ + "settings": {"index.knn": True}, + "mappings": { "properties": { - "values": { - "type": "knn_vector", - "dimension": dimensions - }, + "values": {"type": "knn_vector", "dimension": dimensions}, } - } - } + }, + }, ) # index data vectors = [] for i in range(10): vec = [] - for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) - - vectors.append({ - "_index": index_name, - "_id": i, - "values": vec, - }) + for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) + + vectors.append( + { + "_index": index_name, + "_id": i, + "values": vec, + } + ) # bulk index helpers.bulk(client, vectors) @@ -69,8 +69,8 @@ # search vec = [] -for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) +for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) print(f"Searching for {vec} ...") search_query = {"query": {"knn": {"values": {"vector": vec, "k": 3}}}} @@ -80,4 +80,3 @@ # delete index client.indices.delete(index=index_name) - diff --git a/samples/knn/knn-boolean-filter.py b/samples/knn/knn-boolean-filter.py index a99b1683..5ae7704c 100755 --- a/samples/knn/knn-boolean-filter.py +++ b/samples/knn/knn-boolean-filter.py @@ -1,10 +1,15 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import os import random @@ -13,19 +18,16 @@ # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists @@ -33,38 +35,34 @@ dimensions = 5 if not client.indices.exists(index_name): - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "settings":{ - "index.knn": True - }, - "mappings":{ + "settings": {"index.knn": True}, + "mappings": { "properties": { - "values": { - "type": "knn_vector", - "dimension": dimensions - }, + "values": {"type": "knn_vector", "dimension": dimensions}, } - } - } + }, + }, ) # index data vectors = [] -genres = ['fiction', 'drama', 'romance'] +genres = ["fiction", "drama", "romance"] for i in range(3000): vec = [] - for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) - - vectors.append({ - "_index": index_name, - "_id": i, - "values": vec, - "metadata": { - "genre": random.choice(genres) + for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) + + vectors.append( + { + "_index": index_name, + "_id": i, + "values": vec, + "metadata": {"genre": random.choice(genres)}, } - }) + ) # bulk index helpers.bulk(client, vectors) @@ -74,30 +72,15 @@ # search genre = random.choice(genres) vec = [] -for j in range(dimensions): - vec.append(round(random.uniform(0, 1), 2)) +for j in range(dimensions): + vec.append(round(random.uniform(0, 1), 2)) print(f"Searching for {vec} with the '{genre}' genre ...") search_query = { "query": { "bool": { - "filter": { - "bool": { - "must": [{ - "term": { - "metadata.genre": genre - } - }] - } - }, - "must": { - "knn": { - "values": { - "vector": vec, - "k": 5 - } - } - } + "filter": {"bool": {"must": [{"term": {"metadata.genre": genre}}]}}, + "must": {"knn": {"values": {"vector": vec, "k": 5}}}, } } } diff --git a/samples/knn/knn-efficient-filter.py b/samples/knn/knn-efficient-filter.py index 357eeb6a..cbfd41ad 100755 --- a/samples/knn/knn-efficient-filter.py +++ b/samples/knn/knn-efficient-filter.py @@ -1,79 +1,150 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + import os -import random from opensearchpy import OpenSearch, helpers # connect to an instance of OpenSearch -host = os.getenv('HOST', default='localhost') -port = int(os.getenv('PORT', 9200)) -auth = ( - os.getenv('USERNAME', 'admin'), - os.getenv('PASSWORD', 'admin') -) +host = os.getenv("HOST", default="localhost") +port = int(os.getenv("PORT", 9200)) +auth = (os.getenv("USERNAME", "admin"), os.getenv("PASSWORD", "admin")) client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # check whether an index exists index_name = "hotels-index" if not client.indices.exists(index_name): - client.indices.create(index_name, + client.indices.create( + index_name, body={ - "settings":{ + "settings": { "index.knn": True, "knn.algo_param.ef_search": 100, "number_of_shards": 1, - "number_of_replicas": 0 + "number_of_replicas": 0, }, - "mappings":{ + "mappings": { "properties": { "location": { - "type": "knn_vector", + "type": "knn_vector", "dimension": 2, "method": { "name": "hnsw", "space_type": "l2", "engine": "lucene", - "parameters": { - "ef_construction": 100, - "m": 16 - } - } + "parameters": {"ef_construction": 100, "m": 16}, + }, }, } - } - } + }, + }, ) # index data vectors = [ - { "_index": "hotels-index", "_id": "1", "location": [5.2, 4.4], "parking" : "true", "rating" : 5 }, - { "_index": "hotels-index", "_id": "2", "location": [5.2, 3.9], "parking" : "false", "rating" : 4 }, - { "_index": "hotels-index", "_id": "3", "location": [4.9, 3.4], "parking" : "true", "rating" : 9 }, - { "_index": "hotels-index", "_id": "4", "location": [4.2, 4.6], "parking" : "false", "rating" : 6}, - { "_index": "hotels-index", "_id": "5", "location": [3.3, 4.5], "parking" : "true", "rating" : 8 }, - { "_index": "hotels-index", "_id": "6", "location": [6.4, 3.4], "parking" : "true", "rating" : 9 }, - { "_index": "hotels-index", "_id": "7", "location": [4.2, 6.2], "parking" : "true", "rating" : 5 }, - { "_index": "hotels-index", "_id": "8", "location": [2.4, 4.0], "parking" : "true", "rating" : 8 }, - { "_index": "hotels-index", "_id": "9", "location": [1.4, 3.2], "parking" : "false", "rating" : 5 }, - { "_index": "hotels-index", "_id": "10", "location": [7.0, 9.9], "parking" : "true", "rating" : 9 }, - { "_index": "hotels-index", "_id": "11", "location": [3.0, 2.3], "parking" : "false", "rating" : 6 }, - { "_index": "hotels-index", "_id": "12", "location": [5.0, 1.0], "parking" : "true", "rating" : 3 }, + { + "_index": "hotels-index", + "_id": "1", + "location": [5.2, 4.4], + "parking": "true", + "rating": 5, + }, + { + "_index": "hotels-index", + "_id": "2", + "location": [5.2, 3.9], + "parking": "false", + "rating": 4, + }, + { + "_index": "hotels-index", + "_id": "3", + "location": [4.9, 3.4], + "parking": "true", + "rating": 9, + }, + { + "_index": "hotels-index", + "_id": "4", + "location": [4.2, 4.6], + "parking": "false", + "rating": 6, + }, + { + "_index": "hotels-index", + "_id": "5", + "location": [3.3, 4.5], + "parking": "true", + "rating": 8, + }, + { + "_index": "hotels-index", + "_id": "6", + "location": [6.4, 3.4], + "parking": "true", + "rating": 9, + }, + { + "_index": "hotels-index", + "_id": "7", + "location": [4.2, 6.2], + "parking": "true", + "rating": 5, + }, + { + "_index": "hotels-index", + "_id": "8", + "location": [2.4, 4.0], + "parking": "true", + "rating": 8, + }, + { + "_index": "hotels-index", + "_id": "9", + "location": [1.4, 3.2], + "parking": "false", + "rating": 5, + }, + { + "_index": "hotels-index", + "_id": "10", + "location": [7.0, 9.9], + "parking": "true", + "rating": 9, + }, + { + "_index": "hotels-index", + "_id": "11", + "location": [3.0, 2.3], + "parking": "false", + "rating": 6, + }, + { + "_index": "hotels-index", + "_id": "12", + "location": [5.0, 1.0], + "parking": "true", + "rating": 3, + }, ] helpers.bulk(client, vectors) @@ -86,30 +157,19 @@ "query": { "knn": { "location": { - "vector": [5, 4], - "k": 3, - "filter": { - "bool": { - "must": [ - { - "range": { - "rating": { - "gte": 8, - "lte": 10 - } - } - }, - { - "term": { - "parking": "true" - } - } + "vector": [5, 4], + "k": 3, + "filter": { + "bool": { + "must": [ + {"range": {"rating": {"gte": 8, "lte": 10}}}, + {"term": {"parking": "true"}}, ] } - } + }, } } - } + }, } results = client.search(index=index_name, body=search_query) diff --git a/samples/security/roles.py b/samples/security/roles.py index a77d6eb0..8a2d1ef5 100644 --- a/samples/security/roles.py +++ b/samples/security/roles.py @@ -1,10 +1,14 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. # A basic OpenSearch sample that create and manage roles. @@ -13,16 +17,16 @@ # connect to OpenSearch -host = 'localhost' +host = "localhost" port = 9200 -auth = ('admin', 'admin') # For testing only. Don't store credentials in code. +auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # Create a Role @@ -30,16 +34,16 @@ role_name = "test-role" role_content = { - "cluster_permissions": ["cluster_monitor"], - "index_permissions": [ - { - "index_patterns": ["index", "test-*"], - "allowed_actions": [ - "data_access", - "indices_monitor", - ], - } - ], + "cluster_permissions": ["cluster_monitor"], + "index_permissions": [ + { + "index_patterns": ["index", "test-*"], + "allowed_actions": [ + "data_access", + "indices_monitor", + ], + } + ], } response = client.security.create_role(role_name, body=role_content) diff --git a/samples/security/users.py b/samples/security/users.py index b4bb8e3b..0a778b8d 100644 --- a/samples/security/users.py +++ b/samples/security/users.py @@ -1,10 +1,14 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. # A basic OpenSearch sample that create and manage users. @@ -13,16 +17,16 @@ # connect to OpenSearch -host = 'localhost' +host = "localhost" port = 9200 -auth = ('admin', 'admin') # For testing only. Don't store credentials in code. +auth = ("admin", "admin") # For testing only. Don't store credentials in code. client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_auth = auth, - use_ssl = True, - verify_certs = False, - ssl_show_warn = False + hosts=[{"host": host, "port": port}], + http_auth=auth, + use_ssl=True, + verify_certs=False, + ssl_show_warn=False, ) # Create a User diff --git a/test_opensearchpy/TestHttpServer.py b/test_opensearchpy/TestHttpServer.py index e96670cc..d9fb8ede 100644 --- a/test_opensearchpy/TestHttpServer.py +++ b/test_opensearchpy/TestHttpServer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/__init__.py b/test_opensearchpy/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/__init__.py +++ b/test_opensearchpy/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/run_tests.py b/test_opensearchpy/run_tests.py index ca9db82a..55f1e586 100755 --- a/test_opensearchpy/run_tests.py +++ b/test_opensearchpy/run_tests.py @@ -4,6 +4,11 @@ # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + + # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. diff --git a/test_opensearchpy/test_async/__init__.py b/test_opensearchpy/test_async/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_async/__init__.py +++ b/test_opensearchpy/test_async/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/__init__.py b/test_opensearchpy/test_async/test_helpers/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_async/test_helpers/__init__.py +++ b/test_opensearchpy/test_async/test_helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/conftest.py b/test_opensearchpy/test_async/test_helpers/conftest.py index 56a6bf31..ca0c8d4c 100644 --- a/test_opensearchpy/test_async/test_helpers/conftest.py +++ b/test_opensearchpy/test_async/test_helpers/conftest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_document.py b/test_opensearchpy/test_async/test_helpers/test_document.py index 44aaf1b5..26d49bf0 100644 --- a/test_opensearchpy/test_async/test_helpers/test_document.py +++ b/test_opensearchpy/test_async/test_helpers/test_document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_faceted_search.py b/test_opensearchpy/test_async/test_helpers/test_faceted_search.py index 88344cdb..34e18008 100644 --- a/test_opensearchpy/test_async/test_helpers/test_faceted_search.py +++ b/test_opensearchpy/test_async/test_helpers/test_faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_index.py b/test_opensearchpy/test_async/test_helpers/test_index.py index 4ba51ce2..1958f80f 100644 --- a/test_opensearchpy/test_async/test_helpers/test_index.py +++ b/test_opensearchpy/test_async/test_helpers/test_index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_mapping.py b/test_opensearchpy/test_async/test_helpers/test_mapping.py index a4fb2b24..7c9e799f 100644 --- a/test_opensearchpy/test_async/test_helpers/test_mapping.py +++ b/test_opensearchpy/test_async/test_helpers/test_mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_search.py b/test_opensearchpy/test_async/test_helpers/test_search.py index 5df66804..784193ee 100644 --- a/test_opensearchpy/test_async/test_helpers/test_search.py +++ b/test_opensearchpy/test_async/test_helpers/test_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_helpers/test_update_by_query.py b/test_opensearchpy/test_async/test_helpers/test_update_by_query.py index c535f15a..340bd1b7 100644 --- a/test_opensearchpy/test_async/test_helpers/test_update_by_query.py +++ b/test_opensearchpy/test_async/test_helpers/test_update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_http_connection.py b/test_opensearchpy/test_async/test_http_connection.py index a362f451..282a61c7 100644 --- a/test_opensearchpy/test_async/test_http_connection.py +++ b/test_opensearchpy/test_async/test_http_connection.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_plugins_client.py b/test_opensearchpy/test_async/test_plugins_client.py new file mode 100644 index 00000000..c620873c --- /dev/null +++ b/test_opensearchpy/test_async/test_plugins_client.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +from unittest import TestCase + +from opensearchpy._async.client import AsyncOpenSearch + + +class TestPluginsClient(TestCase): + async def test_plugins_client(self): + with self.assertWarns(Warning) as w: + client = AsyncOpenSearch() + client.plugins.__init__(client) # double-init + self.assertEqual( + str(w.warnings[0].message), + "Cannot load `alerting` directly to AsyncOpenSearch as it already exists. Use `AsyncOpenSearch.plugin.alerting` instead.", + ) diff --git a/test_opensearchpy/test_async/test_server/__init__.py b/test_opensearchpy/test_async/test_server/__init__.py index a2ab2657..794aeb53 100644 --- a/test_opensearchpy/test_async/test_server/__init__.py +++ b/test_opensearchpy/test_async/test_server/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/conftest.py b/test_opensearchpy/test_async/test_server/conftest.py index 42c37edb..2c49aca3 100644 --- a/test_opensearchpy/test_async/test_server/conftest.py +++ b/test_opensearchpy/test_async/test_server/conftest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/__init__.py b/test_opensearchpy/test_async/test_server/test_helpers/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/__init__.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/conftest.py b/test_opensearchpy/test_async/test_server/test_helpers/conftest.py index d5901d68..e6d79c46 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/conftest.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/conftest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_actions.py b/test_opensearchpy/test_async/test_server/test_helpers/test_actions.py index 7355c71d..425eb2c7 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_actions.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_actions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_data.py b/test_opensearchpy/test_async/test_server/test_helpers/test_data.py index 1194304e..bc2df5ba 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_data.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_data.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_document.py b/test_opensearchpy/test_async/test_server/test_helpers/test_document.py index 172dfbfc..650c7b39 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_document.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_faceted_search.py b/test_opensearchpy/test_async/test_server/test_helpers/test_faceted_search.py index ab8ae552..9f2d919b 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_faceted_search.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_index.py b/test_opensearchpy/test_async/test_server/test_helpers/test_index.py index 26f452ca..cc489052 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_index.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_mapping.py b/test_opensearchpy/test_async/test_server/test_helpers/test_mapping.py index c05fd0ec..1dca7959 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_mapping.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_search.py b/test_opensearchpy/test_async/test_server/test_helpers/test_search.py index 54889dc8..8eb202f7 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_search.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_helpers/test_update_by_query.py b/test_opensearchpy/test_async/test_server/test_helpers/test_update_by_query.py index 9c4e7fb6..2db68326 100644 --- a/test_opensearchpy/test_async/test_server/test_helpers/test_update_by_query.py +++ b/test_opensearchpy/test_async/test_server/test_helpers/test_update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_plugins/__init__.py b/test_opensearchpy/test_async/test_server/test_plugins/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_async/test_server/test_plugins/__init__.py +++ b/test_opensearchpy/test_async/test_server/test_plugins/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server/test_rest_api_spec.py b/test_opensearchpy/test_async/test_server/test_rest_api_spec.py index 0773aab0..bb8509dc 100644 --- a/test_opensearchpy/test_async/test_server/test_rest_api_spec.py +++ b/test_opensearchpy/test_async/test_server/test_rest_api_spec.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_async/test_server_secured/__init__.py b/test_opensearchpy/test_async/test_server_secured/__init__.py index 6c0097cd..22c54ac8 100644 --- a/test_opensearchpy/test_async/test_server_secured/__init__.py +++ b/test_opensearchpy/test_async/test_server_secured/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_cases.py b/test_opensearchpy/test_cases.py index c41b86a8..2a5ad5a3 100644 --- a/test_opensearchpy/test_cases.py +++ b/test_opensearchpy/test_cases.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/__init__.py b/test_opensearchpy/test_client/__init__.py index 0a5747ca..ecbd769a 100644 --- a/test_opensearchpy/test_client/__init__.py +++ b/test_opensearchpy/test_client/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_cluster.py b/test_opensearchpy/test_client/test_cluster.py index 15c43d5f..a66072cd 100644 --- a/test_opensearchpy/test_client/test_cluster.py +++ b/test_opensearchpy/test_client/test_cluster.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_indices.py b/test_opensearchpy/test_client/test_indices.py index d6737378..f3e48f1b 100644 --- a/test_opensearchpy/test_client/test_indices.py +++ b/test_opensearchpy/test_client/test_indices.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_plugins/__init__.py b/test_opensearchpy/test_client/test_plugins/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_client/test_plugins/__init__.py +++ b/test_opensearchpy/test_client/test_plugins/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_plugins/test_alerting.py b/test_opensearchpy/test_client/test_plugins/test_alerting.py index 62827655..a59ad04e 100644 --- a/test_opensearchpy/test_client/test_plugins/test_alerting.py +++ b/test_opensearchpy/test_client/test_plugins/test_alerting.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_plugins/test_index_management.py b/test_opensearchpy/test_client/test_plugins/test_index_management.py index 6b126038..2c744e19 100644 --- a/test_opensearchpy/test_client/test_plugins/test_index_management.py +++ b/test_opensearchpy/test_client/test_plugins/test_index_management.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_plugins/test_plugins_client.py b/test_opensearchpy/test_client/test_plugins/test_plugins_client.py new file mode 100644 index 00000000..e717d9cb --- /dev/null +++ b/test_opensearchpy/test_client/test_plugins/test_plugins_client.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + +from opensearchpy.client import OpenSearch + +from ...test_cases import TestCase + + +class TestPluginsClient(TestCase): + def test_plugins_client(self): + with self.assertWarns(Warning) as w: + client = OpenSearch() + client.plugins.__init__(client) # double-init + self.assertEqual( + str(w.warnings[0].message), + "Cannot load `alerting` directly to OpenSearch as it already exists. Use `OpenSearch.plugin.alerting` instead.", + ) diff --git a/test_opensearchpy/test_client/test_point_in_time.py b/test_opensearchpy/test_client/test_point_in_time.py index e8546484..6ce12a46 100644 --- a/test_opensearchpy/test_client/test_point_in_time.py +++ b/test_opensearchpy/test_client/test_point_in_time.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_remote_store.py b/test_opensearchpy/test_client/test_remote_store.py new file mode 100644 index 00000000..92265733 --- /dev/null +++ b/test_opensearchpy/test_client/test_remote_store.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +from test_opensearchpy.test_cases import OpenSearchTestCase + + +class TestRemoteStore(OpenSearchTestCase): + def test_remote_store_restore(self): + self.client.remote_store.restore(body=["index-1"]) + self.assert_url_called("POST", "/_remotestore/_restore") diff --git a/test_opensearchpy/test_client/test_requests.py b/test_opensearchpy/test_client/test_requests.py index 11434a17..3caf8d5f 100644 --- a/test_opensearchpy/test_client/test_requests.py +++ b/test_opensearchpy/test_client/test_requests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_urllib3.py b/test_opensearchpy/test_client/test_urllib3.py index 227164eb..fa63133b 100644 --- a/test_opensearchpy/test_client/test_urllib3.py +++ b/test_opensearchpy/test_client/test_urllib3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_client/test_utils.py b/test_opensearchpy/test_client/test_utils.py index 1a4b6809..888e988d 100644 --- a/test_opensearchpy/test_client/test_utils.py +++ b/test_opensearchpy/test_client/test_utils.py @@ -29,9 +29,8 @@ from __future__ import unicode_literals from opensearchpy.client.utils import _bulk_body, _escape, _make_path, query_params -from opensearchpy.compat import PY2 -from ..test_cases import SkipTest, TestCase +from ..test_cases import TestCase class TestQueryParams(TestCase): @@ -161,14 +160,6 @@ def test_handles_unicode(self): "/some-index/type/%E4%B8%AD%E6%96%87", _make_path("some-index", "type", id) ) - def test_handles_utf_encoded_string(self): - if not PY2: - raise SkipTest("Only relevant for py2") - id = "中文".encode("utf-8") - self.assertEqual( - "/some-index/type/%E4%B8%AD%E6%96%87", _make_path("some-index", "type", id) - ) - class TestEscape(TestCase): def test_handles_ascii(self): diff --git a/test_opensearchpy/test_connection/__init__.py b/test_opensearchpy/test_connection/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_connection/__init__.py +++ b/test_opensearchpy/test_connection/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_connection/test_base_connection.py b/test_opensearchpy/test_connection/test_base_connection.py index 63729206..2c0a3fef 100644 --- a/test_opensearchpy/test_connection/test_base_connection.py +++ b/test_opensearchpy/test_connection/test_base_connection.py @@ -28,13 +28,9 @@ import os import sys -import unittest import warnings -import six - from opensearchpy.connection import Connection -from opensearchpy.exceptions import NotFoundError from ..test_cases import TestCase @@ -92,19 +88,6 @@ def test_raises_warnings_when_folded(self): self.assertEqual([str(w.message) for w in warn], ["warning", "folded"]) - @unittest.skipIf(six.PY2, "not compatible with python2") - def test_raises_errors(self): - con = Connection() - with self.assertLogs("opensearch") as captured, self.assertRaises( - NotFoundError - ): - con._raise_error(404, "Not found", "application/json") - self.assertEqual(len(captured.output), 1) - - # NB: this should assertNoLogs() but that method is not available until python3.10 - with self.assertRaises(NotFoundError): - con._raise_error(404, "Not found", "text/plain; charset=UTF-8") - def test_ipv6_host_and_port(self): for kwargs, expected_host in [ ({"host": "::1"}, "http://[::1]:9200"), diff --git a/test_opensearchpy/test_connection_pool.py b/test_opensearchpy/test_connection_pool.py index 02686e44..f08b6f24 100644 --- a/test_opensearchpy/test_connection_pool.py +++ b/test_opensearchpy/test_connection_pool.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_exceptions.py b/test_opensearchpy/test_exceptions.py index 77a97a91..0b4150fb 100644 --- a/test_opensearchpy/test_exceptions.py +++ b/test_opensearchpy/test_exceptions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/__init__.py b/test_opensearchpy/test_helpers/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_helpers/__init__.py +++ b/test_opensearchpy/test_helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_aggs.py b/test_opensearchpy/test_helpers/test_aggs.py index 13059ccc..057e7f16 100644 --- a/test_opensearchpy/test_helpers/test_aggs.py +++ b/test_opensearchpy/test_helpers/test_aggs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_document.py b/test_opensearchpy/test_helpers/test_document.py index 086bde17..d2da16e0 100644 --- a/test_opensearchpy/test_helpers/test_document.py +++ b/test_opensearchpy/test_helpers/test_document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_faceted_search.py b/test_opensearchpy/test_helpers/test_faceted_search.py index 066fc9d4..9fcc68d1 100644 --- a/test_opensearchpy/test_helpers/test_faceted_search.py +++ b/test_opensearchpy/test_helpers/test_faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_field.py b/test_opensearchpy/test_helpers/test_field.py index 288eab3a..df30ad69 100644 --- a/test_opensearchpy/test_helpers/test_field.py +++ b/test_opensearchpy/test_helpers/test_field.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_index.py b/test_opensearchpy/test_helpers/test_index.py index 40048bc6..7163c09e 100644 --- a/test_opensearchpy/test_helpers/test_index.py +++ b/test_opensearchpy/test_helpers/test_index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_mapping.py b/test_opensearchpy/test_helpers/test_mapping.py index 822440a4..ad042c58 100644 --- a/test_opensearchpy/test_helpers/test_mapping.py +++ b/test_opensearchpy/test_helpers/test_mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_query.py b/test_opensearchpy/test_helpers/test_query.py index 46707f2c..95acfbe5 100644 --- a/test_opensearchpy/test_helpers/test_query.py +++ b/test_opensearchpy/test_helpers/test_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_result.py b/test_opensearchpy/test_helpers/test_result.py index f07c633b..83fe8a08 100644 --- a/test_opensearchpy/test_helpers/test_result.py +++ b/test_opensearchpy/test_helpers/test_result.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_search.py b/test_opensearchpy/test_helpers/test_search.py index 91c7a709..dae61a00 100644 --- a/test_opensearchpy/test_helpers/test_search.py +++ b/test_opensearchpy/test_helpers/test_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_update_by_query.py b/test_opensearchpy/test_helpers/test_update_by_query.py index d298a0a0..336f8fda 100644 --- a/test_opensearchpy/test_helpers/test_update_by_query.py +++ b/test_opensearchpy/test_helpers/test_update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_utils.py b/test_opensearchpy/test_helpers/test_utils.py index 7a620736..c651fe2f 100644 --- a/test_opensearchpy/test_helpers/test_utils.py +++ b/test_opensearchpy/test_helpers/test_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_validation.py b/test_opensearchpy/test_helpers/test_validation.py index b86f8002..e8d9f5aa 100644 --- a/test_opensearchpy/test_helpers/test_validation.py +++ b/test_opensearchpy/test_helpers/test_validation.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_helpers/test_wrappers.py b/test_opensearchpy/test_helpers/test_wrappers.py index c05b9fc3..c49353c5 100644 --- a/test_opensearchpy/test_helpers/test_wrappers.py +++ b/test_opensearchpy/test_helpers/test_wrappers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/__init__.py b/test_opensearchpy/test_server/__init__.py index 78d29958..164e6a5d 100644 --- a/test_opensearchpy/test_server/__init__.py +++ b/test_opensearchpy/test_server/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/conftest.py b/test_opensearchpy/test_server/conftest.py index 03306fcf..128c33eb 100644 --- a/test_opensearchpy/test_server/conftest.py +++ b/test_opensearchpy/test_server/conftest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/__init__.py b/test_opensearchpy/test_server/test_helpers/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_server/test_helpers/__init__.py +++ b/test_opensearchpy/test_server/test_helpers/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_actions.py b/test_opensearchpy/test_server/test_helpers/test_actions.py index 2230edb0..fcb65fde 100644 --- a/test_opensearchpy/test_server/test_helpers/test_actions.py +++ b/test_opensearchpy/test_server/test_helpers/test_actions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_analysis.py b/test_opensearchpy/test_server/test_helpers/test_analysis.py index 9b4f5849..d0073c53 100644 --- a/test_opensearchpy/test_server/test_helpers/test_analysis.py +++ b/test_opensearchpy/test_server/test_helpers/test_analysis.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_count.py b/test_opensearchpy/test_server/test_helpers/test_count.py index f8aa612a..6a507a9f 100644 --- a/test_opensearchpy/test_server/test_helpers/test_count.py +++ b/test_opensearchpy/test_server/test_helpers/test_count.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_data.py b/test_opensearchpy/test_server/test_helpers/test_data.py index 059a983a..91e816b4 100644 --- a/test_opensearchpy/test_server/test_helpers/test_data.py +++ b/test_opensearchpy/test_server/test_helpers/test_data.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_document.py b/test_opensearchpy/test_server/test_helpers/test_document.py index 1bb6ce12..f459afb2 100644 --- a/test_opensearchpy/test_server/test_helpers/test_document.py +++ b/test_opensearchpy/test_server/test_helpers/test_document.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_faceted_search.py b/test_opensearchpy/test_server/test_helpers/test_faceted_search.py index 6b9ee50c..f7469d18 100644 --- a/test_opensearchpy/test_server/test_helpers/test_faceted_search.py +++ b/test_opensearchpy/test_server/test_helpers/test_faceted_search.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_index.py b/test_opensearchpy/test_server/test_helpers/test_index.py index 7df4a737..84525b01 100644 --- a/test_opensearchpy/test_server/test_helpers/test_index.py +++ b/test_opensearchpy/test_server/test_helpers/test_index.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_mapping.py b/test_opensearchpy/test_server/test_helpers/test_mapping.py index d5d84469..a9278159 100644 --- a/test_opensearchpy/test_server/test_helpers/test_mapping.py +++ b/test_opensearchpy/test_server/test_helpers/test_mapping.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_helpers/test_update_by_query.py b/test_opensearchpy/test_server/test_helpers/test_update_by_query.py index b22db642..81a75802 100644 --- a/test_opensearchpy/test_server/test_helpers/test_update_by_query.py +++ b/test_opensearchpy/test_server/test_helpers/test_update_by_query.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_plugins/__init__.py b/test_opensearchpy/test_server/test_plugins/__init__.py index 7e52ae22..392fa5bd 100644 --- a/test_opensearchpy/test_server/test_plugins/__init__.py +++ b/test_opensearchpy/test_server/test_plugins/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server/test_rest_api_spec.py b/test_opensearchpy/test_server/test_rest_api_spec.py index 306993f2..e4c5cb3f 100644 --- a/test_opensearchpy/test_server/test_rest_api_spec.py +++ b/test_opensearchpy/test_server/test_rest_api_spec.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_server_secured/__init__.py b/test_opensearchpy/test_server_secured/__init__.py index 6c0097cd..22c54ac8 100644 --- a/test_opensearchpy/test_server_secured/__init__.py +++ b/test_opensearchpy/test_server_secured/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_types/aliased_types.py b/test_opensearchpy/test_types/aliased_types.py index f7a93e09..6d4a5a64 100644 --- a/test_opensearchpy/test_types/aliased_types.py +++ b/test_opensearchpy/test_types/aliased_types.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_types/async_types.py b/test_opensearchpy/test_types/async_types.py index b26b5d67..e6275662 100644 --- a/test_opensearchpy/test_types/async_types.py +++ b/test_opensearchpy/test_types/async_types.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/test_types/sync_types.py b/test_opensearchpy/test_types/sync_types.py index d772342b..df6634c4 100644 --- a/test_opensearchpy/test_types/sync_types.py +++ b/test_opensearchpy/test_types/sync_types.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/test_opensearchpy/utils.py b/test_opensearchpy/utils.py index 0c07a012..41497808 100644 --- a/test_opensearchpy/utils.py +++ b/test_opensearchpy/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/utils/build-dists.py b/utils/build-dists.py index e6706c57..c52421e7 100644 --- a/utils/build-dists.py +++ b/utils/build-dists.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to diff --git a/utils/generate-api.py b/utils/generate-api.py index fffd0e82..7e241236 100644 --- a/utils/generate-api.py +++ b/utils/generate-api.py @@ -4,6 +4,11 @@ # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. + + # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. @@ -25,6 +30,7 @@ # specific language governing permissions and limitations # under the License. +import json import os import re from functools import lru_cache @@ -33,6 +39,7 @@ from pathlib import Path import black +import deepmerge import requests import unasync import urllib3 @@ -301,9 +308,10 @@ def all_parts(self): parts.update(url.get("parts", {})) for p in parts: - parts[p]["required"] = all( - p in url.get("parts", {}) for url in self._def["url"]["paths"] - ) + if "required" not in parts[p]: + parts[p]["required"] = all( + p in url.get("parts", {}) for url in self._def["url"]["paths"] + ) parts[p]["type"] = "Any" # This piece of logic corresponds to calling @@ -555,6 +563,8 @@ def read_modules(): # Group the data in the current group by the "path" key paths = [] + all_paths_have_deprecation = True + for key2, value2 in groupby(value, key=itemgetter("path")): # Extract the HTTP methods from the data in the current subgroup methods = [] @@ -567,8 +577,10 @@ def read_modules(): documentation = {"description": z["description"]} api.update({"documentation": documentation}) - if "deprecation_message" not in api and "x-deprecation-message" in z: - api.update({"deprecation_message": z["x-deprecation-message"]}) + if "x-deprecation-message" in z: + x_deprecation_message = z["x-deprecation-message"] + else: + all_paths_have_deprecation = False if "params" not in api and "params" in z: api.update({"params": z["params"]}) @@ -637,6 +649,10 @@ def read_modules(): paths.append({"path": key2, "methods": methods}) api.update({"url": {"paths": paths}}) + if all_paths_have_deprecation and x_deprecation_message is not None: + api.update({"deprecation_message": x_deprecation_message}) + + api = apply_patch(namespace, name, api) if namespace not in modules: modules[namespace] = Module(namespace) @@ -647,6 +663,17 @@ def read_modules(): return modules +def apply_patch(namespace, name, api): + override_file_path = ( + CODE_ROOT / "utils/templates/overrides" / namespace / f"{name}.json" + ) + if os.path.exists(override_file_path): + with open(override_file_path) as f: + override_json = json.load(f) + api = deepmerge.always_merger.merge(api, override_json) + return api + + def dump_modules(modules): for mod in modules.values(): mod.dump() diff --git a/utils/license-headers.py b/utils/license-headers.py index 255097d8..67b0ef4a 100644 --- a/utils/license-headers.py +++ b/utils/license-headers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to @@ -6,24 +7,6 @@ # # Modifications Copyright OpenSearch Contributors. See # GitHub history for details. -# -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - """Script which verifies that all source files have a license header. Has two modes: 'fix' and 'check'. 'fix' fixes problems, 'check' will @@ -33,20 +16,20 @@ import os import re import sys -from itertools import chain from typing import Iterator, List -lines_to_keep = ["# -*- coding: utf-8 -*-\n", "#!/usr/bin/env python\n"] -license_header_lines = [ - "# SPDX-License-Identifier: Apache-2.0\n", - "#\n", - "# The OpenSearch Contributors require contributions made to\n", - "# this file be licensed under the Apache-2.0 license or a\n", - "# compatible open source license.\n", - "#\n", - "# Modifications Copyright OpenSearch Contributors. See\n", - "# GitHub history for details.\n", -] +lines_to_keep = ["# -*- coding: utf-8 -*-", "#!/usr/bin/env python"] + +license_header = """ +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +""".strip() def find_files_to_fix(sources: List[str]) -> Iterator[str]: @@ -67,20 +50,18 @@ def find_files_to_fix(sources: List[str]) -> Iterator[str]: def does_file_need_fix(filepath: str) -> bool: if not re.search(r"\.pyi?$", filepath): return False + existing_header = "" with open(filepath, mode="r") as f: - first_license_line = None for line in f: - if line == license_header_lines[0]: - first_license_line = line + line = line.strip() + if len(line) == 0 or line in lines_to_keep: + pass + elif line[0] == "#": + existing_header += line + existing_header += "\n" + else: break - elif line not in lines_to_keep: - return True - for header_line, line in zip( - license_header_lines, chain((first_license_line,), f) - ): - if line != header_line: - return True - return False + return not existing_header.startswith(license_header) def add_header_to_file(filepath: str) -> None: @@ -88,9 +69,9 @@ def add_header_to_file(filepath: str) -> None: lines = list(f) i = 0 for i, line in enumerate(lines): - if line not in lines_to_keep: + if len(line) > 0 and line not in lines_to_keep: break - lines = lines[:i] + license_header_lines + lines[i:] + lines = lines[:i] + [license_header] + lines[i:] with open(filepath, mode="w") as f: f.truncate() f.write("".join(lines)) diff --git a/utils/templates/base b/utils/templates/base index 47bb5956..bf270aee 100644 --- a/utils/templates/base +++ b/utils/templates/base @@ -20,11 +20,13 @@ {% if api.params|list|length %} {% for p, info in api.params %} + {% if info.description %} {% filter wordwrap(72, wrapstring="\n ") %} - :arg {{ p }}{% if info.deprecated %} (Deprecated: {{ info['deprecation_message'][:-1] }}){% endif %}: {{ info.description }}{% if info.options %} Valid choices: {{ info.options|join(", ") }}{% endif %} - {% if info.default is defined %}{% if info.default is not none %}{% if info.default is sameas(false) %} (default: false){% else %} (default: {{ info.default }}){% endif %}{% endif %}{% endif %} + :arg {{ p }}{% if info.deprecated %} (Deprecated: {{ info['deprecation_message'][:-1] }}.){% endif %}: {{ info.description }} {% if info.options %}Valid choices are {{ info.options|join(", ") }}.{% endif %} + {% if info.default is defined %}{% if info.default is not none %}{% if info.default is sameas(false) %}Default is false.{% else %}Default is {{ info.default }}.{% endif %}{% endif %}{% endif %} {% endfilter %} + {% endif %} {% endfor %} {% endif %} """ diff --git a/utils/templates/overrides/indices/put_mapping.json b/utils/templates/overrides/indices/put_mapping.json new file mode 100644 index 00000000..4409c446 --- /dev/null +++ b/utils/templates/overrides/indices/put_mapping.json @@ -0,0 +1,20 @@ +{ + "url": { + "paths": [ + { + "path": "/{index}/_mapping", + "methods": [ + "POST", + "PUT" + ], + "parts": { + "index": { + "type": "string", + "description": "Comma-separated list of indices; use `_all` or empty string to perform the operation on all indices.", + "required": false + } + } + } + ] + } +} \ No newline at end of file