From e9d1faa581ab72b8a33841aeb2147aa7c24654d4 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 3 Oct 2024 11:44:58 +0100 Subject: [PATCH 1/4] add unique token filter docs #8451 Signed-off-by: Anton Rubin --- _analyzers/token-filters/index.md | 2 +- _analyzers/token-filters/unique.md | 102 +++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 _analyzers/token-filters/unique.md diff --git a/_analyzers/token-filters/index.md b/_analyzers/token-filters/index.md index a9b621d5ab..3906671db8 100644 --- a/_analyzers/token-filters/index.md +++ b/_analyzers/token-filters/index.md @@ -59,7 +59,7 @@ Normalization | `arabic_normalization`: [ArabicNormalizer](https://lucene.apache `synonym_graph` | N/A | Supplies a synonym list, including multiword synonyms, for the analysis process. `trim` | [TrimFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html) | Trims leading and trailing white space from each token in a stream. `truncate` | [TruncateTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html) | Truncates tokens whose length exceeds the specified character limit. -`unique` | N/A | Ensures each token is unique by removing duplicate tokens from a stream. +[`unique`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/unique/) | N/A | Ensures each token is unique by removing duplicate tokens from a stream. `uppercase` | [UpperCaseFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/core/LowerCaseFilter.html) | Converts tokens to uppercase. `word_delimiter` | [WordDelimiterFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.html) | Splits tokens at non-alphanumeric characters and performs normalization based on the specified rules. `word_delimiter_graph` | [WordDelimiterGraphFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.html) | Splits tokens at non-alphanumeric characters and performs normalization based on the specified rules. Assigns multi-position tokens a `positionLength` attribute. diff --git a/_analyzers/token-filters/unique.md b/_analyzers/token-filters/unique.md new file mode 100644 index 0000000000..10d0a938f4 --- /dev/null +++ b/_analyzers/token-filters/unique.md @@ -0,0 +1,102 @@ +--- +layout: default +title: Unique +parent: Token filters +nav_order: 450 +--- + +# Unique token filter + +The `unique` token filter ensures that only unique tokens are kept during the analysis process, removing duplicate tokens that appear within a single field or text block. + +## Parameters + +The `unique` token filter can be configured with `only_on_same_position` parameter. If set to `true`, this token filter will act as `remove_duplicates` token filter and will only remove tokens that are in the same position. + +## Example + +The following example request creates a new index named `unique_example` and configures an analyzer with `unique` filter: + +```json +PUT /unique_example +{ + "settings": { + "analysis": { + "filter": { + "unique_filter": { + "type": "unique", + "only_on_same_position": false + } + }, + "analyzer": { + "unique_analyzer": { + "type": "custom", + "tokenizer": "standard", + "filter": [ + "lowercase", + "unique_filter" + ] + } + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the analyzer: + +```json +GET /unique_example/_analyze +{ + "analyzer": "unique_analyzer", + "text": "OpenSearch OpenSearch is powerful powerful and scalable" +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "opensearch", + "start_offset": 0, + "end_offset": 10, + "type": "", + "position": 0 + }, + { + "token": "is", + "start_offset": 22, + "end_offset": 24, + "type": "", + "position": 1 + }, + { + "token": "powerful", + "start_offset": 25, + "end_offset": 33, + "type": "", + "position": 2 + }, + { + "token": "and", + "start_offset": 43, + "end_offset": 46, + "type": "", + "position": 3 + }, + { + "token": "scalable", + "start_offset": 47, + "end_offset": 55, + "type": "", + "position": 4 + } + ] +} +``` From ed5379f3804943fcc5a1bd6acca74cef7cdbe988 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Wed, 16 Oct 2024 17:37:06 +0100 Subject: [PATCH 2/4] updating parameter table Signed-off-by: Anton Rubin --- _analyzers/token-filters/unique.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/_analyzers/token-filters/unique.md b/_analyzers/token-filters/unique.md index 10d0a938f4..a0c57d98d3 100644 --- a/_analyzers/token-filters/unique.md +++ b/_analyzers/token-filters/unique.md @@ -11,7 +11,11 @@ The `unique` token filter ensures that only unique tokens are kept during the an ## Parameters -The `unique` token filter can be configured with `only_on_same_position` parameter. If set to `true`, this token filter will act as `remove_duplicates` token filter and will only remove tokens that are in the same position. +The `unique` token filter can be configured with the following parameter. + +Parameter | Required/Optional | Data type | Description +:--- | :--- | :--- | :--- +`only_on_same_position` | Optional | Boolean | If set to `true`, this token filter will act as `remove_duplicates` token filter and will only remove tokens that are in the same position. Default is `false`. ## Example From a6028380ad66fb0a1aaeff417dafa2b48531daeb Mon Sep 17 00:00:00 2001 From: Fanit Kolchina Date: Thu, 21 Nov 2024 14:27:48 -0500 Subject: [PATCH 3/4] Doc review Signed-off-by: Fanit Kolchina --- _analyzers/token-filters/unique.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_analyzers/token-filters/unique.md b/_analyzers/token-filters/unique.md index a0c57d98d3..c4dfcbab16 100644 --- a/_analyzers/token-filters/unique.md +++ b/_analyzers/token-filters/unique.md @@ -15,11 +15,11 @@ The `unique` token filter can be configured with the following parameter. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`only_on_same_position` | Optional | Boolean | If set to `true`, this token filter will act as `remove_duplicates` token filter and will only remove tokens that are in the same position. Default is `false`. +`only_on_same_position` | Optional | Boolean | If `true`, the token filter acts as a `remove_duplicates` token filter and only removes tokens that are in the same position. Default is `false`. ## Example -The following example request creates a new index named `unique_example` and configures an analyzer with `unique` filter: +The following example request creates a new index named `unique_example` and configures an analyzer with a `unique` filter: ```json PUT /unique_example From a8bcc52b25b9d6471034af0d2ecda8c34df2603b Mon Sep 17 00:00:00 2001 From: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Date: Tue, 3 Dec 2024 10:37:27 -0500 Subject: [PATCH 4/4] Apply suggestions from code review Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --- _analyzers/token-filters/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/token-filters/index.md b/_analyzers/token-filters/index.md index 3906671db8..accca400c5 100644 --- a/_analyzers/token-filters/index.md +++ b/_analyzers/token-filters/index.md @@ -59,7 +59,7 @@ Normalization | `arabic_normalization`: [ArabicNormalizer](https://lucene.apache `synonym_graph` | N/A | Supplies a synonym list, including multiword synonyms, for the analysis process. `trim` | [TrimFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html) | Trims leading and trailing white space from each token in a stream. `truncate` | [TruncateTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html) | Truncates tokens whose length exceeds the specified character limit. -[`unique`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/unique/) | N/A | Ensures each token is unique by removing duplicate tokens from a stream. +[`unique`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/unique/) | N/A | Ensures that each token is unique by removing duplicate tokens from a stream. `uppercase` | [UpperCaseFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/core/LowerCaseFilter.html) | Converts tokens to uppercase. `word_delimiter` | [WordDelimiterFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.html) | Splits tokens at non-alphanumeric characters and performs normalization based on the specified rules. `word_delimiter_graph` | [WordDelimiterGraphFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.html) | Splits tokens at non-alphanumeric characters and performs normalization based on the specified rules. Assigns multi-position tokens a `positionLength` attribute.