From 2dfae7ccc767c3d0d391ee809faec9365275668a Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 10 Oct 2024 11:14:39 +0100 Subject: [PATCH 1/3] add thai tokenizer docs Signed-off-by: Anton Rubin --- _analyzers/tokenizers/index.md | 2 +- _analyzers/tokenizers/thai.md | 108 +++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 _analyzers/tokenizers/thai.md diff --git a/_analyzers/tokenizers/index.md b/_analyzers/tokenizers/index.md index d401851f60..1abc5ee7ff 100644 --- a/_analyzers/tokenizers/index.md +++ b/_analyzers/tokenizers/index.md @@ -2,7 +2,7 @@ layout: default title: Tokenizers nav_order: 60 -has_children: false +has_children: true has_toc: false --- diff --git a/_analyzers/tokenizers/thai.md b/_analyzers/tokenizers/thai.md new file mode 100644 index 0000000000..4999c420b4 --- /dev/null +++ b/_analyzers/tokenizers/thai.md @@ -0,0 +1,108 @@ +--- +layout: default +title: Thai +parent: Tokenizers +nav_order: 140 +--- + +# Thai tokenizer + +The `thai` tokenizer is designed for tokenizing Thai language text. As words in Thai language are not separated by spaces, the tokenizer must identify word boundaries based on language-specific rules. + +## Example usage + +The following example request creates a new index named `thai_index` and configures an analyzer with `thai` tokenizer: + +``` +PUT /thai_index +{ + "settings": { + "analysis": { + "tokenizer": { + "thai_tokenizer": { + "type": "thai" + } + }, + "analyzer": { + "thai_analyzer": { + "type": "custom", + "tokenizer": "thai_tokenizer" + } + } + } + }, + "mappings": { + "properties": { + "content": { + "type": "text", + "analyzer": "thai_analyzer" + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the created analyzer: + +```json +POST /thai_index/_analyze +{ + "analyzer": "thai_analyzer", + "text": "ฉันชอบไปเที่ยวที่เชียงใหม่" +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "ฉัน", + "start_offset": 0, + "end_offset": 3, + "type": "word", + "position": 0 + }, + { + "token": "ชอบ", + "start_offset": 3, + "end_offset": 6, + "type": "word", + "position": 1 + }, + { + "token": "ไป", + "start_offset": 6, + "end_offset": 8, + "type": "word", + "position": 2 + }, + { + "token": "เที่ยว", + "start_offset": 8, + "end_offset": 14, + "type": "word", + "position": 3 + }, + { + "token": "ที่", + "start_offset": 14, + "end_offset": 17, + "type": "word", + "position": 4 + }, + { + "token": "เชียงใหม่", + "start_offset": 17, + "end_offset": 26, + "type": "word", + "position": 5 + } + ] +} +``` From 70c122b7b5072541f00f8a44ec394c491d89d3bc Mon Sep 17 00:00:00 2001 From: Fanit Kolchina Date: Thu, 5 Dec 2024 13:57:09 -0500 Subject: [PATCH 2/3] Doc review Signed-off-by: Fanit Kolchina --- _analyzers/tokenizers/thai.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/_analyzers/tokenizers/thai.md b/_analyzers/tokenizers/thai.md index 4999c420b4..d1d83ff07e 100644 --- a/_analyzers/tokenizers/thai.md +++ b/_analyzers/tokenizers/thai.md @@ -7,13 +7,13 @@ nav_order: 140 # Thai tokenizer -The `thai` tokenizer is designed for tokenizing Thai language text. As words in Thai language are not separated by spaces, the tokenizer must identify word boundaries based on language-specific rules. +The `thai` tokenizer is designed for tokenizing Thai language text. Because words in Thai language are not separated by spaces, the tokenizer must identify word boundaries based on language-specific rules. ## Example usage -The following example request creates a new index named `thai_index` and configures an analyzer with `thai` tokenizer: +The following example request creates a new index named `thai_index` and configures an analyzer with a `thai` tokenizer: -``` +```json PUT /thai_index { "settings": { @@ -45,7 +45,7 @@ PUT /thai_index ## Generated tokens -Use the following request to examine the tokens generated using the created analyzer: +Use the following request to examine the tokens generated using the analyzer: ```json POST /thai_index/_analyze From 2508ddee59175251da5843c685574cd5bc18502a Mon Sep 17 00:00:00 2001 From: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:18:20 -0500 Subject: [PATCH 3/3] Apply suggestions from code review Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --- _analyzers/tokenizers/thai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/tokenizers/thai.md b/_analyzers/tokenizers/thai.md index d1d83ff07e..4afb14a9eb 100644 --- a/_analyzers/tokenizers/thai.md +++ b/_analyzers/tokenizers/thai.md @@ -7,7 +7,7 @@ nav_order: 140 # Thai tokenizer -The `thai` tokenizer is designed for tokenizing Thai language text. Because words in Thai language are not separated by spaces, the tokenizer must identify word boundaries based on language-specific rules. +The `thai` tokenizer tokenizes Thai language text. Because words in Thai language are not separated by spaces, the tokenizer must identify word boundaries based on language-specific rules. ## Example usage