diff --git a/.github/workflows/logs.yml b/.github/workflows/logs.yml new file mode 100644 index 0000000..22f0739 --- /dev/null +++ b/.github/workflows/logs.yml @@ -0,0 +1,26 @@ +name: Check for console.logs + +on: + pull_request: + branches: + - main + +jobs: + check-logs: + runs-on: ubuntu-latest + steps: + - name: get the sources + uses: actions/checkout@v2 + + - name: Set up Node.js + uses: actions/setup-node@v2 + with: + node-version: '20' + + - name: Install dependencies + run: npm ci + + - name: Run check script + run: node .github/workflows/create-check.js + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/manifest.json b/manifest.json index 07512a0..053c399 100644 --- a/manifest.json +++ b/manifest.json @@ -4,7 +4,7 @@ "description": "Interact with your privacy focused assistant, leveraging Ollama or OpenAI, making your second brain even smarter.", "author": "Leo310, nicobrauchtgit", "authorUrl": "https://github.com/nicobrauchtgit", - "version": "0.3.2", + "version": "0.4.2", "minAppVersion": "1.5.0", "isDesktopOnly": true } diff --git a/src/SmartSecondBrain.ts b/src/SmartSecondBrain.ts index f9e4d63..cd5a44c 100644 --- a/src/SmartSecondBrain.ts +++ b/src/SmartSecondBrain.ts @@ -6,6 +6,7 @@ import { isOllamaRunning, getOllamaModels } from './controller/Ollama'; import { isAPIKeyValid } from './controller/OpenAI'; import Log, { LogLvl } from './logging'; import { data, papaState, errorState, papaIndexingProgress, chatHistory, serializeChatHistory, runState, runContent } from './store'; +import { _ } from 'svelte-i18n'; export default class SmartSecondBrain { private papa: Papa; @@ -20,23 +21,29 @@ export default class SmartSecondBrain { async init() { const d = get(data); - if (get(papaState) === 'running') return new Notice('Smart Second Brain is still running.', 4000); + const t = get(_); + if (get(papaState) === 'running') return new Notice(t('notice.still_running'), 4000); else if (get(papaState) === 'indexing' || get(papaState) === 'loading') { - return new Notice('Please wait for the indexing to finish', 4000); + return new Notice(t('notice.still_indexing'), 4000); } else if (d.isIncognitoMode && !(await isOllamaRunning())) { papaState.set('error'); errorState.set('ollama-not-running'); - return new Notice('Please make sure Ollama is running before initializing Smart Second Brain.', 4000); + return new Notice(t('notice.ollama_not_running'), 4000); } else if (d.isIncognitoMode) { const models = await getOllamaModels(); if (!models.includes(d.ollamaGenModel.model)) { papaState.set('error'); - errorState.set('ollama-model-not-installed'); - return new Notice('Ollama model not installed. Please install the model before initializing Smart Second Brain.', 4000); + errorState.set('ollama-gen-model-not-installed'); + return new Notice(t('notice.ollama_gen_model'), 4000); + } + if (!models.includes(d.ollamaEmbedModel.model)) { + papaState.set('error'); + errorState.set('ollama-embed-model-not-installed'); + return new Notice(t('notice.ollama_embed_model'), 4000); } } else if (!d.isIncognitoMode && !(await isAPIKeyValid(d.openAIGenModel.openAIApiKey))) { papaState.set('error'); - return new Notice('Please make sure OpenAI API Key is valid before initializing Smart Second Brain.', 4000); + return new Notice(t('notice.openai_key'), 4000); } if (get(papaState) !== 'indexing-pause') { papaState.set('loading'); @@ -58,7 +65,7 @@ export default class SmartSecondBrain { } catch (e) { Log.error(e); papaState.set('error'); - return new Notice('Failed to initialize Smart Second Brain (Error: ' + e + '). Please retry.', 4000); + return new Notice(t('notice.failed', { values: { error: e } }), 4000); } // check if vector store data exists if (await this.app.vault.adapter.exists(this.getVectorStorePath())) { @@ -75,7 +82,6 @@ export default class SmartSecondBrain { }) ); papaState.set('indexing'); - // const embedNotice = new Notice('Indexing notes into your smart second brain...', 0); let needsSave = false; try { for await (const result of this.papa.embedDocuments(docs)) { @@ -93,23 +99,24 @@ export default class SmartSecondBrain { Log.error(e); papaState.set('error'); // TODO add error state - new Notice('Failed to index notes into your smart second brain. Please retry.', 4000); + new Notice(t('notice.failed_indexing'), 4000); } this.needsToSaveVectorStoreData = needsSave; this.saveVectorStoreData(); if (get(papaIndexingProgress) === 100) { - new Notice('Smart Second Brain initialized.', 2000); + new Notice(t('notice.done'), 2000); papaIndexingProgress.set(0); papaState.set('idle'); } } canRunPapa() { - if (get(papaState) === 'running') return new Notice('Please wait for the current query to finish', 4000) && false; + const t = get(_); + if (get(papaState) === 'running') return new Notice(t('notice.still_running'), 4000) && false; else if (get(papaState) === 'indexing' || get(papaState) === 'indexing-pause' || get(papaState) === 'loading') - return new Notice('Please wait for the indexing to finish', 4000) && false; - else if (get(papaState) === 'error') return new Notice('Please wait for the error to resolve', 4000) && false; - else if (get(papaState) !== 'idle') return new Notice('Please initialize your Smart Second Brain first', 4000) && false; + return new Notice(t('notice.still_indexing'), 4000) && false; + else if (get(papaState) === 'error') return new Notice(t('notice.error'), 4000) && false; + else if (get(papaState) !== 'idle') return new Notice(t('notice.not_initialized'), 4000) && false; return true; } diff --git a/src/components/Chat/Chat.svelte b/src/components/Chat/Chat.svelte index 8f49d85..6fb51e0 100644 --- a/src/components/Chat/Chat.svelte +++ b/src/components/Chat/Chat.svelte @@ -5,6 +5,7 @@ import { afterUpdate } from 'svelte'; import DotAnimation from '../base/DotAnimation.svelte'; import MessageContainer from './MessageContainer.svelte'; + import { t } from 'svelte-i18n'; import { papaState, chatHistory, @@ -45,7 +46,7 @@ }); $: if ($runState === 'retrieving' && $runContent == '0') { - new Notice('No notes retrieved. Maybe lower the similarity threshold.'); + new Notice($t('notice.no_notes_retrieved')); } let editElem: HTMLSpanElement; @@ -90,11 +91,11 @@
{#if $isEditing && editMessageId === message.id} - + {:else} wrapperEditMessage(message, textarea)} use:icon={'pencil-line'} @@ -105,23 +106,17 @@ - +
{#if !$isEditingAssistantMessage} - toClipboard(message.content)} use:icon={'copy'} /> + toClipboard(message.content)} use:icon={'copy'} /> {#if $chatHistory.indexOf(message) !== 0} redoGeneration(message)} use:icon={'refresh-cw'} @@ -131,7 +126,7 @@ editInitialAssistantMessage(message.content, textarea)} use:icon={'pencil-line'} @@ -141,7 +136,7 @@ cancelEditingInitialAssistantMessage(initialAssistantMessageSpan)} use:icon={'x-circle'} @@ -149,7 +144,7 @@ resetInitialAssistantMessage(initialAssistantMessageSpan)} use:icon={'rotate-ccw'} @@ -164,9 +159,9 @@ {#if $runState === 'startup'} {:else if $runState === 'retrieving'} -

Retrieving

+

{$t('chat.retrieving')}

{:else if $runState === 'reducing'} -

Reducing {$runContent} Notes

+

{$t('chat.reducing', { values: { num: $runContent } })}

{:else if $runState === 'generating' && $runContent} diff --git a/src/components/Chat/Input.svelte b/src/components/Chat/Input.svelte index 24f9219..fd0a0fb 100644 --- a/src/components/Chat/Input.svelte +++ b/src/components/Chat/Input.svelte @@ -1,7 +1,6 @@
{#if $chatHistory.length > 1} -
$plugin.saveChat()} - hidden={$papaState === 'running'} - /> +
$plugin.saveChat()} hidden={$papaState === 'running'} /> {/if}
+ > + +
{#if $chatHistory.length > 1} {:else if $papaState === 'indexing-pause'} -

Indexing vault

+

{$t('quick_settings.indexing_vault')}

{:else if $papaState === 'error'} - {#if $errorState === 'ollama-model-not-installed'} -

Install {$data.ollamaGenModel.model} first.

- ($papaState = 'settings-change')} /> + {#if $errorState === 'ollama-gen-model-not-installed'} +

{$t('install_model', { values: { model: $data.ollamaGenModel.model } })}

+ ($papaState = 'settings-change')} /> + {:else if $errorState === 'ollama-embed-model-not-installed'}} +

{$t('install_model', { values: { model: $data.ollamaEmbedModel.model } })}

+ ($papaState = 'settings-change')} /> {:else} -

An error occured.
Please retry initialization...

+

{$t('quick_settings.error.other')}

+ diff --git a/src/components/Onboarding/OllamaApp.svelte b/src/components/Onboarding/OllamaApp.svelte index dd3cffc..4d44dae 100644 --- a/src/components/Onboarding/OllamaApp.svelte +++ b/src/components/Onboarding/OllamaApp.svelte @@ -3,6 +3,8 @@ import { renderMarkdown, icon } from '../../controller/Messages'; import { isOllamaRunning } from '../../controller/Ollama'; import OllamaSetup from './OllamaSetup.svelte'; + import { t } from 'svelte-i18n'; + export let osType: string; let isRunning: boolean = false; @@ -11,17 +13,17 @@
  1. - Download the App - here + {$t('onboarding.ollama.app.download')} + {$t('onboarding.ollama.app.download_link')}
  2. {#if osType === 'Darwin'} -
  3. Extract the .zip and start Ollama
  4. +
  5. {$t('onboarding.ollama.app.extract')}
  6. {:else} -
  7. Run the setup.exe
  8. +
  9. {$t('onboarding.ollama.app.run')}
  10. {/if}
  11. - Test if Ollama is running: + {$t('onboarding.ollama.app.test_label')}
    {#if isOllamaTested} {#if isRunning} @@ -32,12 +34,12 @@ {/if} {#if !isRunning} {$t('onboarding.test')} {/if}
    @@ -45,14 +47,14 @@
  12. {#if isRunning} {#if osType === 'Darwin'} -
  13. Set Ollama origins to enable streaming responses
  14. +
  15. {$t('onboarding.ollama.app.set_origins')}
  16. - Restart the Ollama service + {$t('onboarding.ollama.app.restart')}
  17. {:else} -
  18. Quit Ollama
  19. -
  20. Start the Ollama service with origins
  21. +
  22. {$t('onboarding.ollama.app.quit')}
  23. +
  24. {$t('onboarding.ollama.app.start_origins')}
  25. {/if} diff --git a/src/components/Onboarding/OllamaDaemon.svelte b/src/components/Onboarding/OllamaDaemon.svelte index d8081e7..b3038cd 100644 --- a/src/components/Onboarding/OllamaDaemon.svelte +++ b/src/components/Onboarding/OllamaDaemon.svelte @@ -5,6 +5,7 @@ import { plugin, data } from '../../store'; import { changeOllamaBaseUrl } from '../../controller/Ollama'; import OllamaSetup from './OllamaSetup.svelte'; + import { t } from 'svelte-i18n'; export let osType: string; @@ -15,7 +16,7 @@
      -
    1. Install Ollama
    2. +
    3. {$t('onboarding.ollama.deamon.install')}
    4. {#if osType === 'Darwin'}
      {:else if osType === 'Linux'} @@ -27,11 +28,11 @@
    5. - Set the BaseUrl + {$t('onboarding.ollama.deamon.set_baseurl')}
    6. -
    7. Start the Ollama service with origins
    8. +
    9. {$t('onboarding.ollama.deamon.start')}
    diff --git a/src/components/Onboarding/OllamaSetup.svelte b/src/components/Onboarding/OllamaSetup.svelte index d8e0522..f4cdeaf 100644 --- a/src/components/Onboarding/OllamaSetup.svelte +++ b/src/components/Onboarding/OllamaSetup.svelte @@ -6,25 +6,27 @@ import { plugin, data } from '../../store'; import DropdownComponent from '../base/Dropdown.svelte'; import { isOllamaOriginsSet } from '../../controller/Ollama'; + import { t } from 'svelte-i18n'; import PullOllamaModel from './PullOllamaModel.svelte'; - let model: string = ''; let ollamaModels: string[] = []; + let model: string = ''; let ollamaModelComponent: DropdownComponent; let pullModel = 'nomic-embed-text'; let isOriginsTested: boolean = false; let isOrigin: boolean = false; - onMount(() => { + onMount(async () => { $data.isIncognitoMode = true; $plugin.saveSettings(); }); + $: if (ollamaModelComponent && ollamaModels.some((model) => model === $data.ollamaEmbedModel.model)) model = $data.ollamaEmbedModel.model;
  26. - Test if the origins are set correctly: + {$t('onboarding.ollama.test_origins')}
    {#if isOriginsTested} {#if isOrigin} @@ -40,7 +42,7 @@ isOrigin = await isOllamaOriginsSet(); isOriginsTested = true; ollamaModels = await getOllamaModels(); - }}>Test{$t('onboarding.test')} {/if}
    @@ -48,24 +50,24 @@
  27. {#if isOrigin}
  28. - Install an Ollama Embedding Model.
    + {$t('onboarding.ollama.install_model')}
    - Recomended: + {$t('onboarding.ollama.recommended_models')}
    - + (ollamaModels = await getOllamaModels())} />
  29. - {#if ollamaModels.length} + {#if ollamaModels.length > 0}
  30. - Set your embed Model: + {$t('onboarding.ollama.set_model')}
    diff --git a/src/components/Onboarding/Onboarding.svelte b/src/components/Onboarding/Onboarding.svelte index ebbf6e2..c1e6bdd 100644 --- a/src/components/Onboarding/Onboarding.svelte +++ b/src/components/Onboarding/Onboarding.svelte @@ -5,8 +5,9 @@ import AppComponent from './OllamaApp.svelte'; import OpenAiComponent from './OpenAI.svelte'; import DaemonComponent from './OllamaDaemon.svelte'; - import { icon } from '../../controller/Messages'; import IncognitoToggle from '../Settings/IncognitoToggle.svelte'; + import { t } from 'svelte-i18n'; + import Logo from '../base/Logo.svelte'; const osType = os.type(); @@ -15,12 +16,15 @@
    -
    -

    Setup

    +
    + +
    +

    {$t('onboarding.setup')}

    + {#if $data.isIncognitoMode}

    - Your assistant is running in privacy mode. That means it is not connected to the internet and is running fully locally by leveraging Ollama. + {$t('onboarding.privacy_mode_note')}

    {#if osType === 'Darwin'} @@ -32,8 +36,7 @@ {/if} {:else}

    - Your assistant is using third party services to run. That means you will have to share all your personal information with these services and your - Smart Second Brain needs to be connected to the internet to leverage OpenAIs large language models like ChatGPT. + {$t('onboarding.openai_mode_note')}

    {/if} diff --git a/src/components/Onboarding/OpenAI.svelte b/src/components/Onboarding/OpenAI.svelte index 53574a8..7ea1bd9 100644 --- a/src/components/Onboarding/OpenAI.svelte +++ b/src/components/Onboarding/OpenAI.svelte @@ -5,6 +5,7 @@ import { isAPIKeyValid } from '../../controller/OpenAI'; import { plugin, data } from '../../store'; import InitButtonComponent from './InitButton.svelte'; + import { t } from 'svelte-i18n'; let openAIApiKey: string = $data.openAIGenModel.openAIApiKey; let isValid: boolean = false; @@ -20,44 +21,40 @@
    1. - Create an OpenAI - account + {$t('onboarding.openai.create_account')} + {$t('onboarding.openai.create_account_link')}
    2. - Create an - API Key + {$t('onboarding.openai.create_api_key')} + {$t('onboarding.openai.create_api_key_link')}
    3. -
      [!Warning] Activate API-Key \n> For the API-Key to work you might have to upgrade to an OpenAI paid account. This means depositing at least $5 onto your OpenAI account. This might change in the future.')} - /> +
    4. - Paste the Key here: + {$t('onboarding.openai.paste_api_key')}
    5. - Test your API Key: + {$t('onboarding.openai.test_api_key')}
      {#if isKeyTested} {#if isValid}
      -

      Api Key is valid!

      +

      {$t('onboarding.openai.api_key_valid')}

      {:else}
      {/if} {/if} {#if !isValid} {$t('onboarding.test')} {/if}
      diff --git a/src/components/Onboarding/PullOllamaModel.svelte b/src/components/Onboarding/PullOllamaModel.svelte index 2d0696f..6321a9a 100644 --- a/src/components/Onboarding/PullOllamaModel.svelte +++ b/src/components/Onboarding/PullOllamaModel.svelte @@ -1,10 +1,13 @@ + + + + diff --git a/src/components/Settings/ConfirmModal.ts b/src/components/Settings/ConfirmModal.ts index 678afe2..9ed911b 100644 --- a/src/components/Settings/ConfirmModal.ts +++ b/src/components/Settings/ConfirmModal.ts @@ -1,41 +1,45 @@ -import { App, ButtonComponent, Modal } from 'obsidian'; +import { App, Modal } from 'obsidian'; +import ConfirmComponent from './ConfirmModal.svelte'; +import { get } from 'svelte/store'; +import { data } from '../../store'; +import type { PluginDataKey } from '../../main'; export class ConfirmModal extends Modal { result: string; + title: string; + content: string; + hideModalOption: PluginDataKey | ''; + component: ConfirmComponent; onSubmit: (result: string) => void; - constructor(app: App, onSubmit: (result: string) => void) { + constructor(app: App, title: string, content: string, onSubmit: (result: string) => void, hideModalOption: PluginDataKey | '' = '') { super(app); + this.title = title; + this.content = content; this.onSubmit = onSubmit; + this.hideModalOption = hideModalOption; + } + + activate() { + if (this.hideModalOption !== '' && get(data)[this.hideModalOption]) { + this.onSubmit('Yes'); + return; + } + this.open(); } onOpen() { this.modalEl.parentElement.addClass('mod-confirmation'); - - this.setTitle('Clear Plugin Data'); - - this.setContent( - 'Are you sure you want to delete the plugin data? Note that only the plugin data and the vector store data will be removed. All chat files inside your vault will not be affected.' - ); - - const test = this.modalEl.createDiv({ cls: 'modal-button-container' }); - - new ButtonComponent(test) - .setButtonText('Yes') - .setWarning() - .onClick(() => { - this.close(); - this.onSubmit('Yes'); - }); - - new ButtonComponent(test).setButtonText('No').onClick(() => { - this.close(); - this.onSubmit('No'); + this.component = new ConfirmComponent({ + target: this.contentEl, + props: { + modal: this, + }, }); } onClose() { - let { contentEl } = this; + const { contentEl } = this; contentEl.empty(); } } diff --git a/src/components/Settings/FFExclude.svelte b/src/components/Settings/FFExclude.svelte index 22f04a5..4e3abde 100644 --- a/src/components/Settings/FFExclude.svelte +++ b/src/components/Settings/FFExclude.svelte @@ -4,6 +4,7 @@ import { FileSelectModal } from './FuzzyModal'; import ButtonComponent from '../base/Button.svelte'; import type { FuzzySuggestModal, TAbstractFile } from 'obsidian'; + import { t } from 'svelte-i18n'; let buttonComp: ButtonComponent; @@ -14,4 +15,4 @@ }); - fileSelectModal.open()} buttonText="Add" /> + fileSelectModal.open()} buttonText={$t('settings.excludeff_add')} /> diff --git a/src/components/Settings/IncognitoToggle.svelte b/src/components/Settings/IncognitoToggle.svelte index 652a3c1..18a7ee2 100644 --- a/src/components/Settings/IncognitoToggle.svelte +++ b/src/components/Settings/IncognitoToggle.svelte @@ -2,15 +2,15 @@ import { Notice } from 'obsidian'; import { plugin, data, papaState, type PapaState } from '../../store'; import { crossfade } from 'svelte/transition'; + import { t } from 'svelte-i18n'; const [send, recieve] = crossfade({ duration: 500 }); let oldPapaState: PapaState; function setIncognitoMode(incognito: boolean) { if (incognito === $data.isIncognitoMode) return; - if ($papaState === 'running') return new Notice('Please wait for the current query to finish', 4000); - else if ($papaState === 'indexing' || $papaState === 'indexing-pause' || $papaState === 'loading') - return new Notice('Please wait for the indexing to finish', 4000); + if ($papaState === 'running') return new Notice($t('notice.still_running'), 4000); + else if ($papaState === 'indexing' || $papaState === 'indexing-pause' || $papaState === 'loading') return new Notice($t('notice.still_indexing'), 4000); $data.isIncognitoMode = incognito; $plugin.saveSettings(); if ($papaState === 'mode-change') { @@ -30,7 +30,7 @@ class="flex h-full flex-col items-center justify-center rounded-md p-2 text-center font-bold hover:bg-[--background-modifier-hover]" on:click={() => setIncognitoMode(true)} > - Run on your machine + {$t('incognito_toggle.enable')} {#if $data.isIncognitoMode}
      {/if} @@ -41,7 +41,7 @@ class="flex h-full flex-col items-center justify-center rounded-md p-2 text-center font-bold hover:bg-[--background-modifier-hover]" on:click={() => setIncognitoMode(false)} > - Run via Third-Parties + {$t('incognito_toggle.disable')} {#if !$data.isIncognitoMode}
      {/if} diff --git a/src/components/Settings/Ollama.svelte b/src/components/Settings/Ollama.svelte index eb98ddb..e137191 100644 --- a/src/components/Settings/Ollama.svelte +++ b/src/components/Settings/Ollama.svelte @@ -1,16 +1,17 @@ - + { isRunning = await isOllamaRunning(); - if (!isRunning) return new Notice('Ollama is not running', 4000); + if (!isRunning) return new Notice($t('notice.ollama_not_running'), 4000); installedOllamaModels = await getOllamaModels(); ollamaModels = [...new Set(installedOllamaModels.concat(OllamaGenModelNames).concat(OllamaEmbedModelNames))]; }} /> - - + { await changeOllamaBaseUrl(newBaseUrl); isRunning = await isOllamaRunning(); + styleOllamaBaseUrl = isRunning ? '' : '!bg-[--background-modifier-error]'; }} /> -{#if isRunning} - - - - - - - - -{/if} + + + + + + + + diff --git a/src/components/Settings/OpenAI.svelte b/src/components/Settings/OpenAI.svelte index c500801..cd16d4f 100644 --- a/src/components/Settings/OpenAI.svelte +++ b/src/components/Settings/OpenAI.svelte @@ -4,16 +4,19 @@ import SettingContainer from './SettingContainer.svelte'; import DropdownComponent from '../base/Dropdown.svelte'; import { isAPIKeyValid } from '../../controller/OpenAI'; - import { OpenAIEmbedModels, OpenAIGenModels, OpenAIGenModelNames, OpenAIEmbedModelNames } from './models'; + import { OpenAIGenModels, OpenAIGenModelNames, OpenAIEmbedModelNames } from './models'; import { onMount } from 'svelte'; + import { t } from 'svelte-i18n'; let openAIApiKey: string; let isOpenAIAPIKeyValid = false; + let apiKeyStyles: string = ''; onMount(async () => { isOpenAIAPIKeyValid = await isAPIKeyValid($data.openAIGenModel.openAIApiKey); openAIApiKey = $data.openAIGenModel.openAIApiKey; hideApiKey(); + apiKeyStyles = openAIApiKey && !isOpenAIAPIKeyValid ? '!bg-[--background-modifier-error]' : ''; }); const changeApiKey = async (newApiKey: string) => { @@ -24,6 +27,7 @@ $data.openAIEmbedModel.openAIApiKey = newApiKey; $plugin.saveSettings(); $papaState = 'settings-change'; + apiKeyStyles = openAIApiKey && !isOpenAIAPIKeyValid ? '!bg-[--background-modifier-error]' : ''; }; const hideApiKey = () => { @@ -48,34 +52,32 @@ }; - + - - - + + + + + + ({ display: model, value: model }))} + changeFunc={openAIGenChange} + /> + + + + ({ display: model, value: model }))} + changeFunc={openAIEmbedChange} + /> - -{#if isOpenAIAPIKeyValid} - - - ({ display: model, value: model }))} - changeFunc={openAIGenChange} - /> - - - - ({ display: model, value: model }))} - changeFunc={openAIEmbedChange} - /> - -{/if} diff --git a/src/components/Settings/SettingContainer.svelte b/src/components/Settings/SettingContainer.svelte index 5e27991..a842335 100644 --- a/src/components/Settings/SettingContainer.svelte +++ b/src/components/Settings/SettingContainer.svelte @@ -1,13 +1,14 @@ -
      +
      -
      {settingName}
      -
      {settingDesc}
      +
      {name}
      +
      {desc}
      diff --git a/src/components/Settings/Settings.svelte b/src/components/Settings/Settings.svelte index f080dd8..83eceda 100644 --- a/src/components/Settings/Settings.svelte +++ b/src/components/Settings/Settings.svelte @@ -1,8 +1,8 @@ - + {#if $data.excludeFF.length !== 0}
      @@ -67,10 +73,10 @@ deleteFolder(ff)} + on:click={() => deleteExcludeFF(ff)} />
      @@ -101,22 +107,24 @@
      - Advanced Settings + {$t('settings.advanced')} - + + + + - - - $plugin.clearPluginData()} /> + + $plugin.clearPluginData()} /> - - + + - +
      diff --git a/src/components/Settings/models.ts b/src/components/Settings/models.ts index ebbdef6..ac9f5f7 100644 --- a/src/components/Settings/models.ts +++ b/src/components/Settings/models.ts @@ -1,72 +1,56 @@ -// TODO translate model descriptions export const OpenAIGenModels = { 'gpt-3.5-turbo': { contextWindow: 4096, - description: 'GPT-3.5 Turbo (4096 Tokens)', }, 'gpt-3.5-turbo-1106': { contextWindow: 16385, - description: 'Latest GPT-3.5 Turbo (16385 Tokens)', }, 'gpt-4': { contextWindow: 8192, - description: 'GPT-4 (8192 Tokens)', }, 'gpt-4-32k': { contextWindow: 32768, - description: 'GPT-4 (32768 Tokens)', }, 'gpt-4-1106-preview': { contextWindow: 128000, - description: 'Latest GPT-4 (128000 Tokens)', }, }; export const OllamaGenModels = { llama2: { contextWindow: 4096, - description: 'Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters.', }, 'llama2-uncensored': { contextWindow: 4096, - description: 'Uncensored Llama 2 model by George Sung and Jarrad Hope.', }, mistral: { contextWindow: 8000, - description: 'The 7B model released by Mistral AI, updated to version 0.2.', }, 'mistral-openorca': { contextWindow: 8000, - description: 'Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.', }, gemma: { contextWindow: 8000, - description: 'Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind.', }, mixtral: { contextWindow: 32000, - description: 'A high-quality Mixture of Experts (MoE) model with open weights by Mistral AI.', }, 'dolphin-mixtral': { contextWindow: 32000, - description: 'Dolphin Mixtral (32000 Tokens)', }, phi: { contextWindow: 2048, - description: 'Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities.', }, }; export const OpenAIEmbedModels = { 'text-embedding-ada-002': { contextWindow: 8191, - description: 'Text Embedding ADA 002', }, }; export const OllamaEmbedModels = { 'nomic-embed-text': { - description: 'A high-performing open embedding model with a large token context window.', contextWindow: 8192, }, }; diff --git a/src/components/base/Logo.svelte b/src/components/base/Logo.svelte new file mode 100644 index 0000000..394edb5 --- /dev/null +++ b/src/components/base/Logo.svelte @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/components/base/Text.svelte b/src/components/base/Text.svelte index 158a947..eefc81a 100644 --- a/src/components/base/Text.svelte +++ b/src/components/base/Text.svelte @@ -10,7 +10,7 @@ {#if inputType === 'text'} { export const toClipboard = (messageText: string) => { navigator.clipboard.writeText(messageText); - new Notice(`Copied to clipboard:\n${messageText}`); + new Notice(get(t)('notice.copied_to_clipboard', { values: { text: messageText } }), 4000); }; export const addMessage = (role: 'Assistant' | 'User', content: string) => { diff --git a/src/controller/Ollama.ts b/src/controller/Ollama.ts index 3461540..b6e48a1 100644 --- a/src/controller/Ollama.ts +++ b/src/controller/Ollama.ts @@ -1,15 +1,17 @@ -import { requestUrl } from 'obsidian'; -import { plugin as p, data, papaState } from '../store'; +import { Notice, requestUrl } from 'obsidian'; +import { plugin as p, data, papaState, cancelPullModel } from '../store'; import { get } from 'svelte/store'; import Log from '../logging'; export async function isOllamaRunning() { const d = get(data); try { - const response = await requestUrl(d.ollamaGenModel.baseUrl + '/api/tags'); + const url = new URL(d.ollamaGenModel.baseUrl); + const response = await requestUrl(url + '/api/tags'); if (response.status === 200) { return true; } else { + console.log(d.ollamaGenModel.baseUrl); Log.debug(`IsOllamaRunning, Unexpected status code: ${response.status}`); return false; } @@ -53,20 +55,30 @@ export async function getOllamaModels(): Promise { } } +export async function deleteOllamaModels(): Promise { + const d = get(data); + try { + const modelsRes = await requestUrl({ + url: d.ollamaGenModel.baseUrl + '/api/delete', + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + }, + }); + //TODO langugage + modelsRes.status === 404 ? new Notice('No models to delete') : new Notice('Models deleted'); + } catch (error) { + Log.debug('Ollama is not running', error); + } +} + export const changeOllamaBaseUrl = async (newBaseUrl: string) => { const d = get(data); const plugin = get(p); newBaseUrl.trim(); if (newBaseUrl.endsWith('/')) newBaseUrl = newBaseUrl.slice(0, -1); - try { - // check if url is valid - new URL(newBaseUrl); - d.ollamaGenModel.baseUrl = newBaseUrl; - d.ollamaEmbedModel.baseUrl = newBaseUrl; - //styleOllamaBaseUrl = 'bg-[--background-modifier-form-field]'; - } catch (_) { - //styleOllamaBaseUrl = 'bg-[--background-modifier-error]'; - } + d.ollamaGenModel.baseUrl = newBaseUrl; + d.ollamaEmbedModel.baseUrl = newBaseUrl; await plugin.saveSettings(); papaState.set('settings-change'); }; @@ -91,8 +103,16 @@ export async function* pullOllamaModel(model: string) { const decoder = new TextDecoder(); let buffer = ''; + cancelPullModel.subscribe((value: boolean) => { + if (value) { + reader.cancel(); + new Notice('Model pull cancelled', 1000); + } + }); + while (true) { const { done, value } = await reader.read(); + if (done) break; // Exit the loop when no more data const chunkText = decoder.decode(value, { stream: true }); diff --git a/src/lang/en.json b/src/lang/en.json index 00d44b5..68d2ba6 100644 --- a/src/lang/en.json +++ b/src/lang/en.json @@ -1,6 +1,165 @@ { - "assistant_language": "Assistant language", - "excludeff": "Exclude files and folders", - "incognito_mode": "Incognito Mode", - "init_s2b": "Initialize Smart Second Brain" + "modal": { + "dont_show": "Don't show this again", + "cancel": "Cancel", + "confirm": "Confirm" + }, + "init_third_party_modal": { + "title": "Run via Third-Party Services", + "description": "Are you sure you want to run via third-parties? Your personal data will be shared with third-party services like OpenAI." + }, + "notice": { + "still_running": "Smart Second Brain is still running. Pelase wait.", + "still_indexing": "Smart Second Brain is still indexing. Please wait.", + "ollama_not_running": "Please make sure Ollama is running before initializing Smart Second Brain.", + "ollama_gen_model": "Ollama model not installed. Please install the model before initializing Smart Second Brain.", + "ollama_embed_model": "Ollama model not installed. Please install the model before initializing Smart Second Brain.", + "openai_key": "Please make sure your OpenAI API Key is valid before initializing Smart Second Brain.", + "failed": "Failed to initialize Smart Second Brain (Error: {error}). Please retry.", + "failed_indexing": "Failed to index notes into your smart second brain. Please retry.", + "done": "Smart Second Brain initialized.", + "error": "An error occured. Please wait for the error to resolve.", + "not_initialized": "Smart Second Brain not initialized. Please initialize first.", + "plugin_data_cleared": "Plugin data cleared. Please reload the plugin.", + "api_key_invalid": "API Key is invalid!", + "error_pulling_model": "Failed to pull model from Ollama (Error: {error}). Please retry.", + "copied_to_clipboard": "Copied to clipboard:\n{text}", + "no_notes_retrieved": "No notes retrieved. Maybe lower the similarity threshold." + }, + "chat": { + "edit": "Edit query and regenerate answer", + "cancel_edit": "Cancel editing", + "copy": "Copy Text", + "regenerate": "Deletes all following Messages and regenerates the answer to the current query", + "change_assistant_prompt": "Change the initial assistant message", + "reset_assistant_prompt": "Reset the initial assistant message", + "retrieving": "Retrieving", + "reducing": "Reducing {num} Notes", + "user": "You", + "assistant": "Assistant", + "save": "Save the Chat to a Note", + "delete": "Delete the Chat History", + "toggle_papa": "Chatting with your Notes", + "toggle_llm": "Chatting with plain LLM", + "stop": "Stop your Smart Second Brain", + "send": "Run your Smart Second Brain", + "retry_error": "An error occured. Retry initialization", + "reinitialize": "Reinitialize, Settings changed", + "input_placeholder": "Chat with your Smart Second Brain..." + }, + "onboarding": { + "setup": "Setup", + "test": "Test", + "privacy_mode_note": "Your assistant is running in privacy mode. That means it is not connected to the internet and is running fully locally by leveraging Ollama.", + "openai_mode_note": "Your assistant is using third party services to run. That means you will have to share all your personal information with these services and your Smart Second Brain needs to be connected to the internet to leverage OpenAIs large language models like ChatGPT.", + "init": "Initialize your Smart Second Brain", + "init_label": "Click to initialize", + "ollama": { + "deamon": { + "install": "Install Ollama", + "set_baseurl": "Set the Ollama Base URL", + "start": "Start the Ollama with origins" + }, + "app": { + "download": "Download the App", + "download_link": "here", + "extract": "Extract the .zip and start Ollama", + "run": "Run the setup.exe", + "test_label": "Test if Ollama is running", + "set_origins": "Set Ollama origins to enable streaming responses", + "restart": "Restart the Ollama service ", + "restart_label": "Click menu bar icon and then quit", + "quit": "Quit the Ollama service ", + "quit_label": "Click menu bar icon and then quit", + "start_origins": "Start the Ollama service with origins" + }, + "test_origins": "Test if the origins are set correctly", + "install_model": "Install an Ollama Embedding Model.", + "recommended_models": "Recommended:", + "set_model": "Set your Embedding Model:" + }, + "openai": { + "create_account": "Create an OpenAI ", + "create_account_link": "Account", + "create_api_key": "Create an ", + "create_api_key_link": "API Key", + "api_key_warning": "> [!Warning] Activate API-Key \n> For the API-Key to work you might have to upgrade to an OpenAI paid account. This means depositing at least $5 onto your OpenAI account. This might change in the future.", + "paste_api_key": "Paste your key here:", + "test_api_key": "Test your API Key", + "api_key_valid": "API Key is valid!" + } + }, + "incognito_toggle": { + "enable": "Run on your machine", + "disable": "Run via Third-Party Services" + }, + "settings": { + "excludeff": "Exclude files and folders", + "excludeff_add": "Add", + "excludeff_delete": "Delete from exclude list", + "advanced": "Advanced Settings", + "clear": "Clear Plugin Data", + "clear_modal": { + "title": "Clear Plugin Data", + "description": "Are you sure you want to delete the plugin data? Note that only the plugin data and the vector store data will be removed. All chat files inside your vault will not be affected." + }, + "clear_label": "Clear", + "autostart": "Autostart", + "debugging": "Debugging", + "langsmith_key": "Langsmith API Key", + "verbose": "Developer Console logging", + "openai": { + "description": " ", + "api_key": "API Key", + "gen_model": "Chat Model", + "embed_model": "Embedding Model", + "model_descritions": { + "gpt-3.5-turbo": "GPT-3.5 Turbo (4096 Tokens)", + "gpt-3.5-turbo-1106": "Latest GPT-3.5 Turbo (16385 Tokens)", + "gpt-4": "GPT-4 (8192 Tokens)", + "gpt-4-32k": "GPT-4 (32768 Tokens)", + "gpt-4-1106-preview": "Latest GPT-4 (128000 Tokens)", + "text-embedding-ada-002": "" + } + }, + "ollama": { + "description": "Refresh if your started Ollama.", + "gen_model": "Chat Model", + "embed_model": "Embedding Model", + "recommended": "Recommended", + "other": "Other", + "model_descriptions": { + "llama2": "Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters.", + "llama2-uncensored": "Uncensored Llama 2 model by George Sung and Jarrad Hope.", + "mistral": "The 7B model released by Mistral AI, updated to version 0.2.", + "mistral-openorca": "Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.", + "gemma": "Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind.", + "mixtral": "A high-quality Mixture of Experts (MoE) model with open weights by Mistral AI.", + "dolphin-mixtral": "Dolphin Mixtral (32000 Tokens)", + "phi": "Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities.", + "nomic-embed-text": "A high-performing open embedding model with a large token context window." + } + } + }, + "quick_settings": { + "initialize": "Initialize your Smart Second Brain", + "indexing_vault": "Indexing Vault", + "assistant_language": "Assistant language", + "pause_indexing": "Pause Indexing", + "resume_indexing": "Resume Indexing", + "retry_initialization": "Retry Initialization", + "mode_changed": "Reinitialize Smart Second Brain \nwith ", + "reinitialize": "Reinitialize Smart Second Brain", + "chat_via": "Chat via {model}", + "chatview": "Comfy Chatview", + "similarity_threshold": "Relevancy", + "creativity": "Creativity", + "settings_changed": "Settings changed.\nReinitialize Smart Second Brain.", + "open": "Open quick settings", + "close": "Close quick settings", + "error": { + "install_model": "Install {model} model first", + "other": "An error occured.\nPlease retry initialization..." + } + } } diff --git a/src/lang/i18n.ts b/src/lang/i18n.ts index e62ef69..0bf96e6 100644 --- a/src/lang/i18n.ts +++ b/src/lang/i18n.ts @@ -1,10 +1,10 @@ import { init, addMessages } from 'svelte-i18n'; import en from './en.json'; -import de from './de.json'; +// import de from './de.json'; addMessages('en', en); -addMessages('de', de); +// addMessages('de', de); init({ fallbackLocale: 'en', diff --git a/src/logging.ts b/src/logging.ts index 1316a5a..d6fbfd3 100644 --- a/src/logging.ts +++ b/src/logging.ts @@ -1,5 +1,3 @@ -// TODO eslint rule to remove all console.logs before production - export enum LogLvl { DEBUG = 1, INFO = 2, diff --git a/src/main.ts b/src/main.ts index 4a43438..6d6e1c4 100644 --- a/src/main.ts +++ b/src/main.ts @@ -2,6 +2,7 @@ import { around } from 'monkey-around'; import { Notice, Plugin, TFile, WorkspaceLeaf, WorkspaceSidedock, normalizePath, type ViewState } from 'obsidian'; import { LogLvl, Prompts, type Language, type OllamaEmbedModel, type OllamaGenModel, type OpenAIEmbedModel, type OpenAIGenModel } from 'papa-ts'; import { get } from 'svelte/store'; +import { _ } from 'svelte-i18n'; import SmartSecondBrain from './SmartSecondBrain'; import { ConfirmModal } from './components/Settings/ConfirmModal'; @@ -31,8 +32,12 @@ export interface PluginData { isQuickSettingsOpen: boolean; isVerbose: boolean; isOnboarded: boolean; + hideIncognitoWarning: boolean; + isAutostart: boolean; } +export type PluginDataKey = keyof PluginData; + export const DEFAULT_SETTINGS: Partial = { isChatComfy: true, isUsingRag: true, @@ -63,6 +68,8 @@ export const DEFAULT_SETTINGS: Partial = { isQuickSettingsOpen: true, isVerbose: false, isOnboarded: false, + hideIncognitoWarning: false, + isAutostart: false, }; export default class SecondBrainPlugin extends Plugin { @@ -93,7 +100,7 @@ export default class SecondBrainPlugin extends Plugin { this.leaf = leaves[0]; this.activateView(); } - if (get(data).isOnboarded) this.s2b.init(); + if (get(data).isOnboarded && get(data).isAutostart) this.s2b.init(); }); this.registerEvent( this.app.workspace.on('layout-change', () => { @@ -204,18 +211,23 @@ export default class SecondBrainPlugin extends Plugin { } async clearPluginData() { - new ConfirmModal(get(plugin).app, async (result) => { - if (result === 'Yes') { - await this.saveData({}); - const files = (await this.app.vault.adapter.list(normalizePath(this.manifest.dir))).files; - for (const file of files) { - if (file.endsWith('vector-store.bin')) await this.app.vault.adapter.remove(file); + const t = get(_); + new ConfirmModal( + get(plugin).app, + t('settings.clear_modal.title'), + t('settings.clear_modal.description'), + async (result) => { + if (result === 'Yes') { + await this.saveData({}); + const files = (await this.app.vault.adapter.list(normalizePath(this.manifest.dir))).files; + for (const file of files) { + if (file.endsWith('vector-store.bin')) await this.app.vault.adapter.remove(file); + } + new Notice(t('notice.plugin_data_cleared'), 4000); } - new Notice('Plugin data cleared. Please reload the plugin.', 4000); - } else { - new Notice('Plugin data not cleared.', 4000); - } - }).open(); + }, + '' + ).activate(); } registerMonkeyPatches() { diff --git a/src/store.ts b/src/store.ts index f0f3bfd..90a1255 100644 --- a/src/store.ts +++ b/src/store.ts @@ -10,14 +10,13 @@ export type ChatMessage = { id: string; }; export const plugin = writable(); -export const data = writable(); export const isEditing = writable(false); export const isEditingAssistantMessage = writable(); export const chatInput = writable(''); export const isChatInSidebar = writable(true); -export type ErrorState = 'ollama-model-not-installed' | 'ollama-not-running' | 'ollama-origins-not-set'; +export type ErrorState = 'ollama-gen-model-not-installed' | 'ollama-embed-model-not-installed' | 'ollama-not-running' | 'ollama-origins-not-set'; export const errorState = writable(); export const runState = writable('startup'); @@ -27,6 +26,8 @@ export type PapaState = 'idle' | 'loading' | 'indexing' | 'indexing-pause' | 'ru export const papaState = writable('uninitialized'); export const papaIndexingProgress = writable(0); +export const cancelPullModel = writable(false); + // Does this work? / refactoring export const serializeChatHistory = (cH: ChatMessage[]) => cH @@ -58,3 +59,22 @@ function createChatHistory() { } export const chatHistory = createChatHistory(); + +function createData() { + const { subscribe, set, update } = writable(); + + return { + subscribe, + set, + update, + warningOff: (value) => { + update((d) => { + d[value] = true; + return d; + }); + get(plugin).saveSettings(); + }, + }; +} + +export const data = createData(); diff --git a/versions.json b/versions.json index 84818ce..2985ffa 100644 --- a/versions.json +++ b/versions.json @@ -10,5 +10,6 @@ "0.2.5": "1.5.0" "0.2.6": "1.5.0" "0.2.7": "1.5.0" - "0.2.8": "1.5.8" + "0.2.8": "1.5.0" + "0.4.0": "1.5.0" }