diff --git a/ChatGLMNode/README.md b/ChatGLMNode/README.md
index e3e1ac8..dc771b8 100644
--- a/ChatGLMNode/README.md
+++ b/ChatGLMNode/README.md
@@ -14,6 +14,8 @@ Translation is carried out with the help of artificial intelligence using GLM mo
 > **ChatGLM4TranslateCLIPTextEncodeNode** - translate text, and return CONDITIONING
 >
 > **ChatGLM4TranslateTextNode** - translate text and return text (STRING)
+>
+> **ChatGLM4InstructNode** - Generate prompt from instruct
 
 ## Image:
 
diff --git a/ChatGLMNode/chatglm_translate_node.py b/ChatGLMNode/chatglm_node.py
similarity index 58%
rename from ChatGLMNode/chatglm_translate_node.py
rename to ChatGLMNode/chatglm_node.py
index 9c27c73..accfb1d 100644
--- a/ChatGLMNode/chatglm_translate_node.py
+++ b/ChatGLMNode/chatglm_node.py
@@ -4,13 +4,12 @@
 
 ALL_CODES_LANGS = ['af', 'sq', 'am', 'ar', 'hy', 'as', 'ay', 'az', 'bm', 'eu', 'be', 'bn', 'bho', 'bs', 'bg', 'ca', 'ceb', 'ny', 'zh-CN', 'zh-TW', 'co', 'hr', 'cs', 'da', 'dv', 'doi', 'nl', 'en', 'eo', 'et', 'ee', 'tl', 'fi', 'fr', 'fy', 'gl', 'ka', 'de', 'el', 'gn', 'gu', 'ht', 'ha', 'haw', 'iw', 'hi', 'hmn', 'hu', 'is', 'ig', 'ilo', 'id', 'ga', 'it', 'ja', 'jw', 'kn', 'kk', 'km', 'rw', 'gom', 'ko', 'kri', 'ku', 'ckb', 'ky', 'lo', 'la', 'lv', 'ln', 'lt', 'lg', 'lb', 'mk', 'mai', 'mg', 'ms', 'ml', 'mt', 'mi', 'mr', 'mni-Mtei', 'lus', 'mn', 'my', 'ne', 'no', 'or', 'om', 'ps', 'fa', 'pl', 'pt', 'pa', 'qu', 'ro', 'ru', 'sm', 'sa', 'gd', 'nso', 'sr', 'st', 'sn', 'sd', 'si', 'sk', 'sl', 'so', 'es', 'su', 'sw', 'sv', 'tg', 'ta', 'tt', 'te', 'th', 'ti', 'ts', 'tr', 'tk', 'ak', 'uk', 'ur', 'ug', 'uz', 'vi', 'cy', 'xh', 'yi', 'yo', 'zu']
 
-ZHIPUAI_API_KEY = None
 ENDPOINT_URL = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
+ZHIPUAI_API_KEY = None
 
-
-# Directory translate node and config file
-dir_translate_node = os.path.dirname(__file__)
-config_path = os.path.join(os.path.abspath(dir_translate_node), "config.json")
+# Directory node and config file
+dir_node = os.path.dirname(__file__)
+config_path = os.path.join(os.path.abspath(dir_node), "config.json")
 
 # Load config.js file
 if not os.path.exists(config_path):
@@ -21,28 +20,15 @@
 
         # GET ZHIPUAI_API_KEY from json
         ZHIPUAI_API_KEY = CONFIG.get("ZHIPUAI_API_KEY")
-# =====
+    # =====
 
 
-def translate(prompt, srcTrans, toTrans, model, max_tokens, temperature, top_p):
-    # Check prompt exist
-    if prompt is None or prompt.strip() == "":
-        return ""
-
-    # Create body request
-    payload = {
-        "model": model,
-        "messages": [
-            {
-                "role": "user",
-                "content": f"Translate from {srcTrans} to {toTrans}: {prompt}",
-            },
-        ],
-        "max_tokens": max_tokens,
-        "temperature": temperature,
-        "top_p": top_p,
-    }
+def createRequest(payload):
+    global ZHIPUAI_API_KEY
 
+    if ZHIPUAI_API_KEY is None or ZHIPUAI_API_KEY.strip() == "" or ZHIPUAI_API_KEY == "your_api_key":
+        raise ValueError("ZHIPUAI_API_KEY value is empty or missing")
+        
     # Headers
     headers = {
         "Authorization": f"Bearer {ZHIPUAI_API_KEY}",
@@ -55,24 +41,47 @@ def translate(prompt, srcTrans, toTrans, model, max_tokens, temperature, top_p):
 
         if response.status_code == 200:
             json_data = response.json()
-            translate_text_prompt = json_data.get("choices")[0]["message"][
+            response_text = json_data.get("choices")[0]["message"][
                 "content"
             ].strip()
 
-            return (
-                translate_text_prompt if translate_text_prompt and not None else ""
-            )
+            return response_text
 
     except requests.HTTPError as e:
         print(
-            f"Error translate text ChatGLM: {response.status_code}, {response.text}"
+            f"Error request ChatGLM: {response.status_code}, {response.text}"
         )
         raise e
     except Exception as e:
-        print(f"Error translate text ChatGLM: {e}")
+        print(f"Error ChatGLM: {e}")
         raise e
 
 
+
+def translate(prompt, srcTrans, toTrans, model, max_tokens, temperature, top_p):
+    # Check prompt exist
+    if prompt is None or prompt.strip() == "":
+        return ""
+
+    # Create body request
+    payload = {
+        "model": model,
+        "messages": [
+            {
+                "role": "user",
+                "content": f"Translate from {srcTrans} to {toTrans}: {prompt}",
+            },
+        ],
+        "max_tokens": round(max_tokens, 2),
+        "temperature": round(temperature, 2),
+        "top_p": round(top_p, 2),
+    }
+
+    response_translate_text = createRequest(payload)
+
+    return response_translate_text
+
+
 class ChatGLM4TranslateCLIPTextEncodeNode:
     @classmethod
     def INPUT_TYPES(self):
@@ -95,11 +104,11 @@ def INPUT_TYPES(self):
                 "max_tokens": ("INT", {"default": 1024, "tooltip": "The maximum number of tokens for model output, maximum output is 4095, default value is 1024."}),
                 "temperature": (
                     "FLOAT",
-                    {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.05, "tooltip": "Sampling temperature, controls the randomness of the output, must be a positive number within the range: [0.0, 1.0], default value is 0.95."},
+                    {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Sampling temperature, controls the randomness of the output, must be a positive number within the range: [0.0, 1.0], default value is 0.95."},
                 ),
                 "top_p": (
                     "FLOAT",
-                    {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.05, "tooltip": "Another method of temperature sampling, value range is: [0.0, 1.0], default value is 0.7."},
+                    {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Another method of temperature sampling, value range is: [0.0, 1.0], default value is 0.7."},
                 ),
                 "text": ("STRING", {"multiline": True, "placeholder": "Input text"}),
                 "clip": ("CLIP",),
@@ -127,8 +136,6 @@ def chatglm_translate_text(
         text,
         clip,
     ):
-        if ZHIPUAI_API_KEY is None or ZHIPUAI_API_KEY.strip() == "" or ZHIPUAI_API_KEY == "your_api_key":
-            raise ValueError("ZHIPUAI_API_KEY value is empty or missing")
 
         text = translate(
             text, from_translate, to_translate, model, max_tokens, temperature, top_p
@@ -154,11 +161,78 @@ def INPUT_TYPES(self):
     def chatglm_translate_text(
         self, from_translate, to_translate, model, max_tokens, temperature, top_p, text
     ):
-        if ZHIPUAI_API_KEY is None or ZHIPUAI_API_KEY.strip() == "" or ZHIPUAI_API_KEY == "your_api_key":
-            raise ValueError("ZHIPUAI_API_KEY value is empty or missing")
-
+        
         text = translate(
             text, from_translate, to_translate, model, max_tokens, temperature, top_p
         )
 
         return (text,)
+
+
+# ChatGLM Instruct Node
+class ChatGLM4InstructNode():
+    @classmethod
+    def INPUT_TYPES(self):
+        return {
+            "required": {
+                "model": (
+                    [
+                        "glm-4-plus",
+                        "glm-4-0520",
+                        "glm-4",
+                        "glm-4-air",
+                        "glm-4-airx",
+                        "glm-4-long",
+                        "glm-4-flash",
+                    ],
+                    {"default": "glm-4-flash", "tooltip": "The model code to be called. Model 'glm-4-flash' is free!"},
+                ),
+                "max_tokens": ("INT", {"default": 1024, "tooltip": "The maximum number of tokens for model output, maximum output is 4095, default value is 1024."}),
+                "temperature": (
+                    "FLOAT",
+                    {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Sampling temperature, controls the randomness of the output, must be a positive number within the range: [0.0, 1.0], default value is 0.95."},
+                ),
+                "top_p": (
+                    "FLOAT",
+                    {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Another method of temperature sampling, value range is: [0.0, 1.0], default value is 0.7."},
+                ),
+                "instruct": ("STRING", {"multiline": True, "placeholder": "Input instruct text", "default": "Generate details text, without quotation marks or the word 'prompt' on english: {query}" , "tooltip": "Enter the instruction for the neural network to execute and indicate where to insert the query text {query}"}),
+                "query": ("STRING", {"multiline": True, "placeholder": "Enter the query text for the instruction", "tooltip": "Query field" }),
+            }
+        }
+
+    RETURN_TYPES = ("STRING",)
+    FUNCTION = "chatglm_instruct"
+
+    CATEGORY = "AlekPet Nodes/Instruct"
+
+    def chatglm_instruct(
+        self, model, max_tokens, temperature, top_p, instruct, query
+    ):
+        
+        if instruct is None or instruct.strip() == "":
+            raise ValueError("Instruct text is empty!")
+
+        if query is None or query.strip() == "":
+            raise ValueError("Query text is empty!")
+
+        instruct = instruct.replace("{query}", query)
+
+        # Create body request
+        payload = {
+            "model": model,
+            "messages": [
+                {
+                    "role": "user",
+                    "content": instruct,
+                },
+            ],
+            "max_tokens": round(max_tokens, 2),
+            "temperature": round(temperature, 2),
+            "top_p": round(top_p, 2),
+        }   
+
+        answer = createRequest(payload)
+
+        return (answer, )
+    
\ No newline at end of file
diff --git a/README.md b/README.md
index 06e82f6..5f6c07b 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,7 @@ If you enjoy my work, consider **[supporting me](https://alekpet.github.io/suppo
 | _ArgosTranslateCLIPTextEncodeNode_    | The node translate promt uses module **Argos Translator** from other languages into english, and return conditioning | AlekPet Node/conditioning |
 | _ChatGLM4TranslateTextNode_           |               This translator node uses artificial intelligence to translate prompts and return string               |     AlekPet Node/text     |
 | _ChatGLM4TranslateCLIPTextEncodeNode_ |            This translator node uses artificial intelligence to translate prompts and return conditioning            | AlekPet Node/conditioning |
+| ChatGLM4InstructNode                  |                              This node uses artificial intelligence to generate prompt                               |   AlekPet Node/Instruct   |
 | _PreviewTextNode_                     |                                           The node displays the input text                                           |    AlekPet Node/extras    |
 | _ColorsCorrectNode_                   |                                         The node for correcting image colors                                         |    AlekPet Node/extras    |
 | _HexToHueNode_                        |                          The node convert HEX color to HUE (degrees and normal [-0.5, 0.5])                          |    AlekPet Node/extras    |
diff --git a/__init__.py b/__init__.py
index cc025a7..99fdd62 100644
--- a/__init__.py
+++ b/__init__.py
@@ -276,9 +276,9 @@ def installNodes():
     ArgosTranslateCLIPTextEncodeNode,
     ArgosTranslateTextNode,
 )
-from .ChatGLMNode.chatglm_translate_node import (
+from .ChatGLMNode.chatglm_node import (
     ChatGLM4TranslateCLIPTextEncodeNode,
-    ChatGLM4TranslateTextNode,
+    ChatGLM4TranslateTextNode, ChatGLM4InstructNode
 )
 from .DeepTranslatorNode.deep_translator_node import (
     DeepTranslatorCLIPTextEncodeNode,
@@ -299,6 +299,7 @@ def installNodes():
     "ArgosTranslateTextNode": ArgosTranslateTextNode,
     "ChatGLM4TranslateCLIPTextEncodeNode": ChatGLM4TranslateCLIPTextEncodeNode,
     "ChatGLM4TranslateTextNode": ChatGLM4TranslateTextNode,
+    "ChatGLM4InstructNode": ChatGLM4InstructNode,
     "DeepTranslatorCLIPTextEncodeNode": DeepTranslatorCLIPTextEncodeNode,
     "DeepTranslatorTextNode": DeepTranslatorTextNode,
     "PreviewTextNode": PreviewTextNode,
@@ -317,6 +318,7 @@ def installNodes():
     "ArgosTranslateTextNode": "Argos Translate Text Node",
     "ChatGLM4TranslateCLIPTextEncodeNode": "ChatGLM-4 Translate CLIP Text Encode Node",
     "ChatGLM4TranslateTextNode": "ChatGLM-4 Translate Text Node",
+    "ChatGLM4InstructNode": "ChatGLM-4 Instruct Node",
     "DeepTranslatorCLIPTextEncodeNode": "Deep Translator CLIP Text Encode Node",
     "DeepTranslatorTextNode": "Deep Translator Text Node",
     "PreviewTextNode": "Preview Text Node",
diff --git a/pyproject.toml b/pyproject.toml
index 8ca6ded..b15ed10 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,7 +1,7 @@
 [project]
 name = "comfyui_custom_nodes_alekpet"
-description = "Nodes: PoseNode, PainterNode, TranslateTextNode, TranslateCLIPTextEncodeNode, DeepTranslatorTextNode, DeepTranslatorCLIPTextEncodeNode, ArgosTranslateTextNode, ArgosTranslateCLIPTextEncodeNode, ChatGLM4TranslateCLIPTextEncodeNode, ChatGLM4TranslateTextNode, PreviewTextNode, HexToHueNode, ColorsCorrectNode, IDENode."
-version = "1.0.35"
+description = "Nodes: PoseNode, PainterNode, TranslateTextNode, TranslateCLIPTextEncodeNode, DeepTranslatorTextNode, DeepTranslatorCLIPTextEncodeNode, ArgosTranslateTextNode, ArgosTranslateCLIPTextEncodeNode, ChatGLM4TranslateCLIPTextEncodeNode, ChatGLM4TranslateTextNode, ChatGLM4InstructNode, PreviewTextNode, HexToHueNode, ColorsCorrectNode, IDENode."
+version = "1.0.36"
 license = { file = "LICENSE" }
 
 [project.urls]