diff --git a/Packs/AnythingLLM/.pack-ignore b/Packs/AnythingLLM/.pack-ignore new file mode 100644 index 00000000000..358499f9356 --- /dev/null +++ b/Packs/AnythingLLM/.pack-ignore @@ -0,0 +1,2 @@ +[file:AnythingLLM.yml] +ignore=IN153 diff --git a/Packs/AnythingLLM/.secrets-ignore b/Packs/AnythingLLM/.secrets-ignore new file mode 100644 index 00000000000..5979ee040be --- /dev/null +++ b/Packs/AnythingLLM/.secrets-ignore @@ -0,0 +1 @@ +https://docs.useanything.com diff --git a/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.py b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.py new file mode 100644 index 00000000000..8b90235bee3 --- /dev/null +++ b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.py @@ -0,0 +1,758 @@ +import demistomock as demisto # noqa: F401 +from CommonServerPython import * # noqa: F401 + +import json +import shutil + + +''' CLIENT CLASS ''' +class Client(BaseClient): + def test_module(self): + self._http_request("GET", "/v1/auth") + + + def document_list(self): + return self._list("documents") + + + def document_get(self, folder: str, document: str): + try: + name = document_name(folder, document, self.document_list()) + response = self._http_request( + method = "GET", + url_suffix = f"/v1/document/{name}" + ) + except Exception as e: + msg = f"AnythingLLM: document_get: exception getting document details - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def document_delete(self, folder: str, document: str): + try: + name = document_name(folder, document, self.document_list()) + data = { + "names": [ + f"{folder}/{name}" + ] + } + response = self._http_request( + method = "DELETE", + url_suffix = "/v1/system/remove-documents", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: document_delete: exception deleting document - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return {"message": response} + + + def document_createfolder(self, folder: str): + try: + data = { + "name": folder + } + response = self._http_request( + method = "POST", + url_suffix = "/v1/document/create-folder", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: document_createfolder: exception creating folder - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def document_move(self, srcfolder: str, dstfolder: str, document: str): + try: + name = document_name(srcfolder, document, self.document_list()) + data = { + "files": [ + { + "from": f"{srcfolder}/{name}", + "to": f"{dstfolder}/{name}" + } + ] + } + response = self._http_request( + method = "POST", + url_suffix = "/v1/document/move-files", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: document_move: exception moving document - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def document_upload_text(self, text: str, title: str, description: str, author: str, source: str): + try: + try: + exists = False + document_name("custom-documents", title, self.document_list()) + exists = True + except Exception: + data = { + "textContent": text, + "metadata": { + "title": title, + "docAuthor": author, + "description": description, + "docSource": source + } + } + response = self._http_request( + method = "POST", + url_suffix = "/v1/document/raw-text", + json_data = data + ) + finally: + if exists: # pylint: disable=E0601 + raise Exception(f"document already exists [{title}]") + except Exception as e: + msg = f"AnythingLLM: document_upload_text: exception uploading text - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response # pylint: disable=E0601 + + + def document_upload_link(self, link: str, title: str, description: str, author: str, source: str): + try: + try: + exists = False + document_name("custom-documents", title, self.document_list()) + exists = True + except Exception: + data = { + "link": link, + "metadata": { + "title": title, + "docAuthor": author, + "description": description, + "docSource": source + } + } + response = self._http_request( + method = "POST", + url_suffix = "/v1/document/raw-text", + json_data = data + ) + finally: + if exists: # pylint: disable=E0601 + raise Exception(f"document already exists [{title}]") + except Exception as e: + msg = f"AnythingLLM: document_upload_link: exception uploading link [{link}] - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response # pylint: disable=E0601 + + + def document_upload_file(self, entry_id): + try: + headers = self._headers + del headers['Content-Type'] + file_path = demisto.getFilePath(entry_id)['path'] + file_name = demisto.getFilePath(entry_id)['name'] + try: + exists = False + document_name("custom-documents", file_name, self.document_list()) + exists = True + except Exception: + shutil.copy(file_path, file_name) + response = self._http_request( + method = 'POST', + headers = headers, + url_suffix = "/v1/document/upload", + files = {'file': (file_name, open(file_name, 'rb'))} + ) + finally: + if exists: # pylint: disable=E0601 + raise Exception(f"document already exists [{file_name}]") + except Exception as e: + msg = f"AnythingLLM: document_upload_file: exception uploading a file entry [{entry_id}] from the war room - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + finally: + shutil.rmtree(file_name, ignore_errors=True) + + return response # pylint: disable=E0601 + + + def workspace_new(self, workspace: str): + try: + if len(workspace.strip()) == 0: + raise Exception("workspace parameter is blank") + try: + exists = False + workspace_slug(workspace, self.workspace_list()) + exists = True + except Exception: + data = { + 'name': workspace + } + response = self._http_request( + method = "POST", + url_suffix = "/v1/workspace/new", + json_data = data + ) + return response + finally: + if exists: # pylint: disable=E0601 + raise Exception("workspace already exists") + except Exception as e: + msg = f"AnythingLLM: workspace_new: exception creating a new workspace [{workspace}] - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + + def workspace_chat(self, workspace: str, message: str, mode: str): + return self._chat(workspace, message, mode, "chat") + + + def workspace_stream_chat(self, workspace: str, message: str, mode: str): + return self._chat(workspace, message, mode, "stream-chat") + + + def workspace_list(self): + return self._list("workspaces") + + + def workspace_get(self, workspace:str ): + try: + slug = workspace_slug(workspace, self.workspace_list()) + response = self._http_request( + method = "GET", + url_suffix = f"/v1/workspace/{slug}", + ) + except Exception as e: + msg = f"AnythingLLM: workspace_get: exception getting workspace details - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def workspace_delete(self, workspace:str ): + try: + slug = workspace_slug(workspace, self.workspace_list()) + self._http_request( + method = "DELETE", + url_suffix = f"/v1/workspace/{slug}", + resp_type='bytes' + ) + except Exception as e: + msg = f"AnythingLLM: workspace_delete: exception deleting workspace - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return {"message": {"success": True, "message": "Workspace removed successfully"}} + + + def workspace_settings(self, workspace:str, settings: dict ): + try: + settings = validate_workspace_settings(settings) + slug = workspace_slug(workspace, self.workspace_list()) + response = self._http_request( + method = "POST", + url_suffix = f"/v1/workspace/{slug}/update", + json_data = settings + ) + except Exception as e: + msg = f"AnythingLLM: workspace_settings: exception updating workspace settings - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def workspace_add_embedding(self, workspace: str, folder: str, document: str): + return self._embedding(workspace, folder, document, "adds") + + + def workspace_delete_embedding(self, workspace: str, folder: str, document: str): + return self._embedding(workspace, folder, document, "deletes") + + + def workspace_pin(self, workspace:str, folder:str, document:str, status: str): + try: + if status.lower() == "true": + pinst = True + elif status.lower() == "false": + pinst = False + else: + raise Exception("document pin status of [true] or [false] not passed") + name = document_name(folder, document, self.document_list()) + data = { + "docPath": f"{folder}/{name}", + "pinStatus": pinst + } + slug = workspace_slug(workspace, self.workspace_list()) + response = self._http_request( + method = "POST", + url_suffix = f"/v1/workspace/{slug}/update-pin", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: workspace_pin: exception pinning embedded document to workspace - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def _chat(self, workspace: str, message: str, mode: str, type: str): + try: + data = { + 'message': message, + 'mode': validate_chat_mode(mode) + } + slug = workspace_slug(workspace, self.workspace_list()) + response = self._http_request( + method = "POST", + url_suffix = f"/v1/workspace/{slug}/{type}", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: _chat: exception chatting - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def _list(self, items: str): + try: + response = self._http_request( + method="GET", + url_suffix=f"/v1/{items}", + ) + except Exception as e: + msg = f"AnythingLLM: _list: exception listing {items} - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + + + def _embedding(self, workspace: str, folder: str, document: str, action: str): + try: + name = document_name(folder, document, self.document_list()) + + try: + ws = self.workspace_get(workspace) + except Exception: + raise Exception(f"workspace [{workspace}] not found") + + if action == "adds": + if embedding_exists(name, ws): + raise Exception(f"[{document}] already embedded") + elif action == "deletes": + if not embedding_exists(name, ws): + raise Exception(f"[{document}] not embedded") + + data = { + action: [f"{folder}/{name}"] + } + slug = workspace_slug(workspace, self.workspace_list()) + response = self._http_request( + method = "POST", + url_suffix = f"/v1/workspace/{slug}/update-embeddings", + json_data = data + ) + except Exception as e: + msg = f"AnythingLLM: _embedding: exception [{action}] a document embedding - {str(e)}" + demisto.debug(msg) + raise Exception(msg) + + return response + +''' HELPER FUNCTIONS ''' + +def embedding_exists(docname: str, ws: dict) -> bool: + for doc in ws['workspace']['documents']: + if doc['filename'] == docname: + return True + return False + + +def workspace_slug(workspace: str, workspaces) -> str: + for w in workspaces['workspaces']: + if w['name'] == workspace: + return w['slug'] + raise Exception(f"workspace name not found [{workspace}]") + + +def normal_document_title(title: str): + title = ' '.join(title.strip().split()) + return title.lower().replace(" ", "-") + ".txt" + + +def document_name(folder: str, title: str, documents) -> str: + for f in documents['localFiles']['items']: + if f['name'] == folder: + for d in f['items']: + if d['title'] in [title, normal_document_title(title)]: + return d['name'] + raise Exception(f"document title not found [{title}]") + + +def validate_chat_mode(mode: str): + if mode not in ['chat', 'query']: + raise Exception(f"Invalid chat mode [{mode}]") + return mode + + +def validate_workspace_settings(settings: dict): + new_settings = {} + if "name" in settings: + new_settings['name'] = settings['name'] + #if "vectorTag" in settings: + # new_settings['vectorTag'] = settings['vectorTag'] + if "openAiTemp" in settings: + new_settings['openAiTemp'] = float(settings['openAiTemp']) + if "openAiHistory" in settings: + new_settings['openAiHistory'] = int(settings['openAiHistory']) + if "openAiPrompt" in settings: + new_settings['openAiPrompt'] = settings['openAiPrompt'] + if "similarityThreshold" in settings: + new_settings['similarityThreshold'] = float(settings['similarityThreshold']) + #if "chatProvider" in settings: + # new_settings['chatProvider'] = settings['chatProvider'] + #if "chatModel" in settings: + # new_settings['chatModel'] = settings['chatModel'] + if "topN" in settings: + new_settings['topN'] = int(settings['topN']) + if "chatMode" in settings: + new_settings['chatMode'] = settings['chatMode'] + if "queryRefusalResponse" in settings: + new_settings['queryRefusalResponse'] = settings['queryRefusalResponse'] + return new_settings + + +def DictMarkdown(nested, indent): + md = "" + if indent == "": + indent = "-" + else: + indent = " "+indent + if isinstance(nested, dict): + for key, val in nested.items(): + if isinstance(val, dict): + md += f"{indent} {key}\n" + md += DictMarkdown(val, indent) + elif isinstance(val, list): + md += f"{indent} {key}\n" + md += DictMarkdown(val, indent) + else: + md += f"{indent} {key}: {val}\n" + elif isinstance(nested, list): + for val in nested: + md += f"{indent} []\n" + if isinstance(val, dict): + md += DictMarkdown(val, indent) + elif isinstance(val, list): + md += f"{indent} {val}\n" + md += DictMarkdown(val, indent) + else: + md += f" {indent} {val}\n" + + return md + + +''' COMMAND FUNCTIONS ''' + + +def test_module(client: Client, args: dict) -> str: + try: + client.test_module() + except DemistoException as e: + if 'Forbidden' in str(e): + return 'Authorization Error: ensure API Key is correctly set' + else: + raise e + + return 'ok' + + +def list_command(client: Client, args: dict) -> CommandResults: + response: dict = {} + return CommandResults( + outputs_prefix = 'AnythingLLM.list', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def settings_command(client: Client, args: dict) -> CommandResults: + response: dict = {} + return CommandResults( + outputs_prefix = 'AnythingLLM.settings', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_list_command(client: Client, args: dict) -> CommandResults: + response = client.document_list() + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_list', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_createfolder_command(client: Client, args: dict) -> CommandResults: + response = client.document_createfolder(args['folder']) + return CommandResults( + outputs_prefix = 'AnythingLLM.document_createfolder', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_delete_command(client: Client, args: dict) -> CommandResults: + response = client.document_delete(args['folder'], args['document']) + return CommandResults( + outputs_prefix = 'AnythingLLM.document_delete', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_move_command(client: Client, args: dict) -> CommandResults: + response = client.document_move(args['srcfolder'], args['dstfolder'], args['document']) + return CommandResults( + outputs_prefix = 'AnythingLLM.document_move', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_get_command(client: Client, args: dict) -> CommandResults: + response = client.document_get(args['folder'], args['document']) + return CommandResults( + outputs_prefix = 'AnythingLLM.document_move', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_upload_file_command(client: Client, args: dict) -> CommandResults: + response = client.document_upload_file(args['fileentry']) + return CommandResults( + outputs_prefix = 'AnythingLLM.upload_file', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_upload_link_command(client: Client, args: dict) -> CommandResults: + response = client.document_upload_text( + args['link'], + args['title'], + args['description'], + args['author'], + args['source'] + ) + return CommandResults( + outputs_prefix = 'AnythingLLM.upload_link', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def document_upload_text_command(client: Client, args: dict) -> CommandResults: + response = client.document_upload_text( + args['text'], + args['title'], + args['description'], + args['author'], + args['source'] + ) + return CommandResults( + outputs_prefix = 'AnythingLLM.upload_text', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_delete_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_delete(args['workspace']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_delete', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_get_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_get(args['workspace']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_get', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_list_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_list() + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_list', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_new_command(client: Client, args: dict) -> CommandResults: + #if 'workspace' in args: + response = client.workspace_new(args['workspace']) + return CommandResults( + outputs_prefix ='AnythingLLM.workspace_new', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + #msg = f"AnythingLLM: workspace_new_command: missing command arguments [workspace]" + #demisto.debug(msg) + #raise Exception(msg) + + +def workspace_chat_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_chat(args['workspace'], args['message'], args['mode']) + return CommandResults( + outputs_prefix ='AnythingLLM.workspace_chat', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_stream_chat_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_stream_chat(args['workspace'], args['message'], args['mode']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_stream_chat', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_delete_embedding_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_delete_embedding(args['workspace'], args['folder'], args['document']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_delete_embedding', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_add_embedding_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_add_embedding(args['workspace'], args['folder'], args['document']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_add_embedding', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_pin_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_pin(args['workspace'], args['folder'], args['document'], args['status']) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_pin', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def workspace_settings_command(client: Client, args: dict) -> CommandResults: + response = client.workspace_settings(args['workspace'], json.loads(args['settings'])) + return CommandResults( + outputs_prefix = 'AnythingLLM.workspace_settings', + readable_output = DictMarkdown(response, ""), + outputs = response + ) + + +def main() -> None: # pragma: no cover + params = demisto.params() + args = demisto.args() + command = demisto.command() + + demisto.debug(f'Command being called is {command}') + + try: + headers = { + 'accept': "application/json", + 'Authorization': f"Bearer {params.get('apikey')['password']}", + 'Content-Type': "application/json" + } + client = Client( + base_url = params.get('url') + "/api", + verify = not params.get('insecure', False), + headers = headers, + proxy = params.get('proxy', False) + ) + + if command == 'test-module': + # This is the call made when pressing the integration Test button. + result = test_module(client, params) + return_results(result) + + #elif command == "anyllm-list": + # return_results(list_command(client, args)) + #elif command == "anyllm-settings": + # return_results(settings_command(client, args)) + + elif command == "anyllm-document-list": + return_results(document_list_command(client, args)) + elif command == "anyllm-document-createfolder": + return_results(document_createfolder_command(client, args)) + elif command == "anyllm-document-get": + return_results(document_get_command(client, args)) + elif command == "anyllm-document-move": + return_results(document_move_command(client, args)) + elif command == "anyllm-document-delete": + return_results(document_delete_command(client, args)) + elif command == "anyllm-document-upload-file": + return_results(document_upload_file_command(client, args)) + elif command == "anyllm-document-upload-link": + return_results(document_upload_link_command(client, args)) + elif command == "anyllm-document-upload-text": + return_results(document_upload_text_command(client, args)) + + elif command == "anyllm-workspace-get": + return_results(workspace_get_command(client, args)) + elif command == "anyllm-workspace-list": + return_results(workspace_list_command(client, args)) + elif command == "anyllm-workspace-new": + return_results(workspace_new_command(client, args)) + elif command == "anyllm-workspace-chat": + return_results(workspace_chat_command(client, args)) + elif command == "anyllm-workspace-stream-chat": + return_results(workspace_stream_chat_command(client, args)) + elif command == "anyllm-workspace-delete-embedding": + return_results(workspace_delete_embedding_command(client, args)) + elif command == "anyllm-workspace-add-embedding": + return_results(workspace_add_embedding_command(client, args)) + elif command == "anyllm-workspace-pin": + return_results(workspace_pin_command(client, args)) + elif command == "anyllm-workspace-delete": + return_results(workspace_delete_command(client, args)) + elif command == "anyllm-workspace-settings": + return_results(workspace_settings_command(client, args)) + else: + raise NotImplementedError(f'Command {command} is not implemented') + except Exception as e: + return_error(f'Failed to execute {command} command.\nError: {str(e)}') + + +if __name__ in ('__main__', '__builtin__', 'builtins'): + main() diff --git a/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.yml b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.yml new file mode 100644 index 00000000000..fff7a1c5fcc --- /dev/null +++ b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM.yml @@ -0,0 +1,212 @@ +commonfields: + id: AnythingLLM + version: -1 +name: AnythingLLM +display: AnythingLLM +category: Utilities +description: "Retrieval Augmented Generation (RAG) with LLM and Vector DB that can be local for full data privacy or cloud-based for greater functionality.\n\nAPIs are documented at: /api/docs \n\nProduct documentation: https://docs.useanything.com/" +configuration: +- section: Connect + display: AnythingLLM URL (e.g., http://:3001) or https:// + name: url + defaultvalue: http://localhost:3001 + type: 0 + required: true +- section: Collect + display: AnythingLLM API Key + displaypassword: AnythingLLM API Key + name: apikey + type: 9 + required: true + hiddenusername: true +script: + script: '' + type: python + commands: + - name: anyllm-document-upload-file + arguments: + - name: fileentry + required: true + description: 'XSOAR file entry to upload - example: 181@24789.' + description: Uploads an XSOAR file entry to the custom-documents folder. + - name: anyllm-document-upload-link + arguments: + - name: link + required: true + description: 'Web link to upload - example: https://unit42.paloaltonetworks.com/darkgate-malware-uses-excel-files".' + - name: title + required: true + description: Document title to use. + - name: description + required: true + description: Description of the content in the document. + - name: author + required: true + description: Who is the author of the document. + - name: source + required: true + description: What is the source of the document. + description: Uploads a web link to the custom-documents folder. + - name: anyllm-document-upload-text + arguments: + - name: text + required: true + description: Raw text content that is the document. + - name: title + required: true + description: Document title to use when uploading. + - name: description + description: Description of the content in the document. + - name: author + description: Author of the document. + - name: source + description: Source of the document. + description: Upload text content as a document to the custom-documents folder. + - name: anyllm-workspace-new + arguments: + - name: workspace + required: true + description: Name of the workspace to create. + description: Creates a new workspace in AnythingLLM. + - name: anyllm-workspace-delete + arguments: + - name: workspace + required: true + description: Name of the workspace to delete. + description: Deletes an AnythingLLM workspace. + - name: anyllm-workspace-list + arguments: [] + description: List all the workspaces in AnythingLLM. + - name: anyllm-workspace-get + arguments: + - name: workspace + required: true + description: Name of the workspace. + description: Get a specific workspace details. + - name: anyllm-workspace-settings + arguments: + - name: workspace + required: true + description: Name of the workspace. + - name: settings + required: true + description: JSON object for the settings. + description: 'Update workspace settings. Anything LLM APIs are documented at: /api/docs.' + - name: anyllm-workspace-add-embedding + arguments: + - name: workspace + required: true + description: Name of the workspace. + - name: folder + required: true + description: Folder name containing the document. + - name: document + required: true + description: Document name to add as an embedding. + description: Add a document to a workspace and create its vector embedding in the workspace. + - name: anyllm-workspace-delete-embedding + arguments: + - name: workspace + required: true + description: Name of the workspace. + - name: folder + required: true + description: Folder the document originated from. + - name: document + required: true + description: Name of the document to have its embedding deleted. + description: Delete a document embedding from the workspace. + - name: anyllm-document-createfolder + arguments: + - name: folder + required: true + description: Name of the folder to create. + description: Create a new document folder. + - name: anyllm-document-move + arguments: + - name: srcfolder + required: true + description: Name of the source folder. + - name: dstfolder + description: Name of the destination folder. + - name: document + description: Document name to move. + description: Move a document from a source folder to a destination folder. + - name: anyllm-document-delete + arguments: + - name: folder + required: true + description: Name of the folder. + - name: document + required: true + description: Name of the document to delete. + description: Delete a document. + - name: anyllm-workspace-chat + arguments: + - name: workspace + required: true + description: Name of the workspace. + - name: message + required: true + description: Message to send. + - name: mode + required: true + auto: PREDEFINED + predefined: + - query + - chat + description: Mode to chat, query or chat. + description: Send a chat message to a workspace (default thread). Query mode is based on embedded documents in chat, whereas chat mode is more general. + - name: anyllm-workspace-stream-chat + arguments: + - name: workspace + required: true + description: Name of the workspace. + - name: message + required: true + description: Message to send. + - name: mode + required: true + auto: PREDEFINED + predefined: + - query + - chat + description: Chat mode, query or chat. + description: Send a stream chat message to a workspace (default thread). Query mode is based on embedded documents in chat, whereas chat mode is more general. + - name: anyllm-document-list + arguments: [] + description: List all document details. + - name: anyllm-document-get + arguments: + - name: folder + required: true + description: Folder containing the document. + - name: document + required: true + description: Document name. + description: Get a specific document details. + - name: anyllm-workspace-pin + arguments: + - name: workspace + required: true + description: Workspace name. + - name: folder + required: true + description: Folder the document originated from. + - name: document + required: true + description: Document name. + - name: status + required: true + auto: PREDEFINED + predefined: + - 'true' + - 'false' + description: Set pin status to true or false. + description: Set the pinned status of a document embedding. + dockerimage: demisto/python3:3.11.9.104657 + runonce: false + subtype: python3 +fromversion: 6.10.0 +tests: +- No tests (auto formatted) diff --git a/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_description.md b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_description.md new file mode 100644 index 00000000000..beecd941af8 --- /dev/null +++ b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_description.md @@ -0,0 +1,5 @@ +## AnythingLLM +- Install AnythingLLM on a Windows/Linux/Mac host +- In AnythingLLM, generate an API key in AnythingLLM +- In the integration instance, configure the **url** parameter to the AnythingLLM host +- In the integration instance, configure the **apikey** parameter \ No newline at end of file diff --git a/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_image.png b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_image.png new file mode 100644 index 00000000000..5c94b27bdfd Binary files /dev/null and b/Packs/AnythingLLM/Integrations/AnythingLLM/AnythingLLM_image.png differ diff --git a/Packs/AnythingLLM/Integrations/AnythingLLM/README.md b/Packs/AnythingLLM/Integrations/AnythingLLM/README.md new file mode 100644 index 00000000000..66d42a6c74a --- /dev/null +++ b/Packs/AnythingLLM/Integrations/AnythingLLM/README.md @@ -0,0 +1,365 @@ +Retrieval Augmented Generation (RAG) with LLM and Vector DB that can be local for full data privacy or cloud-based for greater functionality +## Configure AnythingLLM on Cortex XSOAR + +1. Navigate to **Settings** > **Integrations** > **Servers & Services**. +2. Search for AnythingLLM. +3. Click **Add instance** to create and configure a new integration instance. + + | **Parameter** | **Required** | + | --- | --- | + | AnythingLLM URL (e.g., http://<url to AnythingLLM>:3001) | True | + | AnythingLLM API Key | True | + +4. Click **Test** to validate the URLs, token, and connection. + +## Commands + +You can execute these commands from the Cortex XSOAR CLI, as part of an automation, or in a playbook. +After you successfully execute a command, a DBot message appears in the War Room with the command details. + +### anyllm-document-upload-file + +*** +Uploads an XSOAR file entry to the custom-documents folder + +#### Base Command + +`anyllm-document-upload-file` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| fileentry | XSOAR file entry to upload - example: 181@24789. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-document-upload-link + +*** +Uploads a web link to the custom-documents folder + +#### Base Command + +`anyllm-document-upload-link` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| link | Web link to upload - example: https://unit42.paloaltonetworks.com/darkgate-malware-uses-excel-files". | Required | +| title | No description provided. | Required | +| description | No description provided. | Required | +| author | No description provided. | Required | +| source | No description provided. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-document-upload-text + +*** +Upload text content as a document to the custom-documents folder + +#### Base Command + +`anyllm-document-upload-text` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| text | Raw text content that is the document. | Required | +| title | Document title to use when uploading. | Required | +| description | Description of the document. | Optional | +| author | Author of the document. | Optional | +| source | Source of the document. | Optional | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-new + +*** +Creates a new workspace in AnythingLLM + +#### Base Command + +`anyllm-workspace-new` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace to create. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-delete + +*** +Deletes an AnythingLLM workspace + +#### Base Command + +`anyllm-workspace-delete` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace to delete. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-list + +*** +List all the workspaces in AnythingLLM + +#### Base Command + +`anyllm-workspace-list` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-get + +*** +Get a specific workspace details + +#### Base Command + +`anyllm-workspace-get` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-settings + +*** +Update workspace settings + +#### Base Command + +`anyllm-workspace-settings` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | +| settings | JSON object for the settings. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-add-embedding + +*** +Add a document to a workspace and create its vector embedding in the workspace + +#### Base Command + +`anyllm-workspace-add-embedding` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | +| folder | Folder name containing the document. | Required | +| document | Document name to add as an embedding. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-delete-embedding + +*** +Delete a document embedding from the workspace + +#### Base Command + +`anyllm-workspace-delete-embedding` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | +| folder | Folder the document originated from. | Required | +| document | Name of the document to have its embedding deleted. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-document-createfolder + +*** +Create a new document folder + +#### Base Command + +`anyllm-document-createfolder` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| folder | Name of the folder to create. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-document-move + +*** +Move a document from a source folder to a destination folder + +#### Base Command + +`anyllm-document-move` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| srcfolder | Name of the source folder. | Required | +| dstfolder | Name of the destination folder. | Optional | +| document | Document name to move. | Optional | + +#### Context Output + +There is no context output for this command. +### anyllm-document-delete + +*** +Delete a document + +#### Base Command + +`anyllm-document-delete` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| folder | Name of the folder. | Required | +| document | Name of the document to delete. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-chat + +*** +Send a chat message to a workspace (default thread). Query mode is based on embedded documents in chat, whereas chat mode is more general + +#### Base Command + +`anyllm-workspace-chat` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | +| message | Message to send. | Required | +| mode | Mode to chat, query or chat. Possible values are: query, chat. | Optional | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-stream-chat + +*** +Send a stream chat message to a workspace (default thread). Query mode is based on embedded documents in chat, whereas chat mode is more general + +#### Base Command + +`anyllm-workspace-stream-chat` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Name of the workspace. | Required | +| message | Message to send. | Required | +| mode | Chat mode, query or chat. Possible values are: query, chat. | Optional | + +#### Context Output + +There is no context output for this command. +### anyllm-document-list + +*** +List all document details + +#### Base Command + +`anyllm-document-list` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | + +#### Context Output + +There is no context output for this command. +### anyllm-document-get + +*** +Get a specific document details + +#### Base Command + +`anyllm-document-get` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| folder | Folder containing the document. | Required | +| document | Document name. | Required | + +#### Context Output + +There is no context output for this command. +### anyllm-workspace-pin + +*** +Set the pinned status of a document embedding + +#### Base Command + +`anyllm-workspace-pin` + +#### Input + +| **Argument Name** | **Description** | **Required** | +| --- | --- | --- | +| workspace | Workspace name. | Required | +| folder | Folder the document originated from. | Required | +| document | Document name. | Required | +| status | Set pin status to true or false. Possible values are: true, false. | Required | + +#### Context Output + +There is no context output for this command. diff --git a/Packs/AnythingLLM/README.md b/Packs/AnythingLLM/README.md new file mode 100644 index 00000000000..81a2e3d5cc6 --- /dev/null +++ b/Packs/AnythingLLM/README.md @@ -0,0 +1,73 @@ +# Anything LLM +This content pack contains an integration for Anything LLM that supports using Retrieval Augmented Generation (RAG) with an LLM and user documents embedded in a vector DB. The LLM and vector DB can be fully local for maximum data privacy or configured to use cloud-based services such as OpenAI. A variety of LLMs and vector DBs are supported. Anything LLM itself can be installed on customer infrastructure or accessed as a cloud service. + +### Locally Hosted + +#### Example local LLM models: + +* Llama3 +* Llama2 +* Codellama +* Mistral +* Gemma +* Orca + +#### Example local vector DBs: + +* LanceDB +* Chroma +* Milvus + +### Cloud Hosted + +#### Example cloud LLM services: + +* OpenAI +* Google Gemini +* Anthropic +* Cohere +* Hugging Face +* Perplexity + +#### Example cloud vector DB services: + +* Pinecode +* QDrant +* Weaviate + +## Setup + +For local installation of Anything LLM, install it on a host running: + +* Linux +* Windows +* Mac + +Once Anything LLM is installed: + +* Generate an API key for the XSOAR integration +* Activate your selected LLM and vector DB +* Configure the XSOAR integration instance with **url** and **apikey** + +## Use + +For the most accurated results, **query** mode is recommended for chats. This preloads the chat context based on the initial query with similar results from documents embedded in a workspace and avoids most hallucinations. In a large document, **query** mode does not ensure a complete answer depending on the number of times the query topic is mentioned in the embedded documents and limits on the number of returned similar results and size of the context window in the selected LLM. Text splitting and chunking can be adjusted from the defaults to better support a specific use case. Adjusting the **similarityThreshold** and **topN** settings in a workspace are often beneficial to optimize the workspace for a use case. + +#### Update Workspace Settings + +The following JSON keys are currently supported for updating: + +* name - workspace name +* openAiTemp - LLM temperature (0 - 1) where 1 is more creative and 0 is more repeatable +* openAiHistory - chat history length to keep in context +* openAiPrompt - prompt +* similarityThreshold - vector DB similarity (None, 0.25, 0.50, 0.75) +* topN - top N similar results to return to chat context (1 - 12) +* chatMode - query mode focuses on using the embedded document data, chat mode is traditional LLM chatting (query, chat) +* queryRefusalResponse - message to respond with when similar results are not found in embedded documents + +Example command to update workspace settings: + +``` +!anyllm-workspace-settings workspace="Unit42 Reports" settings="{\"openAiTemp\": \"0.30\", \"similarityThreshold\": \"0.50\", \"openAiHistory\": \"35\", \"topN\": \"8\", \"chatMode\": \"query\"}" +``` diff --git a/Packs/AnythingLLM/pack_metadata.json b/Packs/AnythingLLM/pack_metadata.json new file mode 100644 index 00000000000..df9d841e157 --- /dev/null +++ b/Packs/AnythingLLM/pack_metadata.json @@ -0,0 +1,23 @@ +{ + "name": "Anything LLM", + "description": "This content pack contains an integration for Anything LLM that supports the use of Retrieval Augmented Generation (RAG) with an LLM and vector DB. The LLM and vector DB can be fully local for maximum data privacy or configured to use cloud-based services such as OpenAI. A large range of LLMs and vector DBs are supported. ", + "support": "community", + "currentVersion": "1.0.0", + "author": "Randy Uhrlaub", + "url": "", + "email": "", + "created": "2024-07-16T14:45:21Z", + "categories": [ + "Utilities" + ], + "tags": [], + "useCases": [], + "keywords": [], + "marketplaces": [ + "xsoar", + "marketplacev2" + ], + "githubUser": [ + "rurhrlaub" + ] +} \ No newline at end of file