generated from obsidianmd/obsidian-sample-plugin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.ts
901 lines (809 loc) · 27.5 KB
/
main.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
// Obsidian AI Chat as Markdown copyright 2024 by Charl P. Botha <[email protected]>
import {
type App,
type Editor,
type EditorPosition,
type EmbedCache,
type MarkdownView,
type MetadataCache,
Notice,
Plugin,
PluginSettingTab,
Setting,
type TFile,
type TextAreaComponent,
type Vault,
parseLinktext,
resolveSubpath,
type HeadingCache,
} from "obsidian";
import OpenAI from "openai";
interface AIChatAsMDSettings {
apiHost: string;
openAIAPIKey: string;
model: string;
systemPrompt: string;
systemPromptFile: string;
showUsedModel: boolean;
debug: boolean;
}
const DEFAULT_SETTINGS: AIChatAsMDSettings = {
// openai: https://api.openai.com
// openrouter: https://openrouter.ai/api
apiHost: "https://api.openai.com",
openAIAPIKey: "",
// openai: gpt-4o
// openrouter: anthropic/claude-3.5-sonnet
model: "gpt-4o",
systemPrompt: `You are an AI assistant, outputting into an Obsidian markdown document. You have access to fenced codeblocks and MathJax notation. When responding:
1. Prioritize brevity and information density. Aim for concise, high-impact answers.
2. Use markdown formatting for text styling and organization, but avoid using # headings as your output could be streaming into a deeply nested part of the markdown document.
3. Use fenced codeblocks with language specification for any code snippets.
4. Use MathJax for math: inline $ Ax = b $ or block-level $$ E = mc^2 $$
5. Avoid unnecessary elaboration or examples unless specifically requested.
6. Use technical language and jargon appropriate to the topic, assuming user familiarity.
7. Provide direct answers without preamble or excessive context-setting.
Maintain a precise, informative tone. Focus on delivering maximum relevant information in minimum space.`,
systemPromptFile: "",
showUsedModel: false,
debug: false,
};
/// Convert image resource URL to data URL
/// If the passed resource URL can't be drawn to a canvas, an exception will be raised
// based on https://github.com/sissilab/obsidian-image-toolkit/issues/4#issuecomment-908898483
function imageToDataURL(imgSrc: string, maxEdge = 512, debug = false) {
return new Promise<{ dataURL: string; x: number; y: number }>(
(resolve, reject) => {
const image = new Image();
image.crossOrigin = "anonymous";
image.onload = () => {
const dims = [image.width, image.height];
const longestEdgeIndex = dims[0] > dims[1] ? 0 : 1;
if (dims[longestEdgeIndex] > maxEdge) {
const downscaleFactor = maxEdge / dims[longestEdgeIndex];
for (let i = 0; i < 2; i++) {
dims[i] = Math.round(dims[i] * downscaleFactor);
}
if (debug)
console.log(`resizing to ${dims[0]} x ${dims[1]}`);
}
const canvas = document.createElement("canvas");
canvas.width = dims[0];
canvas.height = dims[1];
const ctx = canvas.getContext("2d");
if (ctx === null) {
reject("Could not get 2d context from canvas");
return;
}
ctx.drawImage(image, 0, 0, dims[0], dims[1]);
// toDataURL() returns e.g. data:image/png;base64,....
// https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toDataURL
// we webp which should give smaller files for the same quality
// https://developers.google.com/speed/webp/docs/webp_study
const dataURL = canvas.toDataURL("image/webp");
resolve({ dataURL, x: dims[0], y: dims[1] });
};
image.onerror = (e) => {
// important to signal the error to the caller via our promise
reject(`Error loading image: ${e}`);
};
image.src = imgSrc;
}
);
}
function initMessages(
systemMessage: string
): OpenAI.ChatCompletionMessageParam[] {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{ role: "system", content: systemMessage },
];
return messages;
}
/**
* Convert a subsection of a markdown file into a list of OpenAI.ChatCompletionContentPart
*
* This will take care of the main text, as well as text (whole file, heading, block) and image embeds.
*
* @param startOffset Starting 0-based offset of the range to convert. Pass null / undefined for start of file.
* @param endOffset Ending non-inclusive 0-based offset of the range to convert. Pass null / undefined for end of file.
* @param markdownFile
* @param vault Used to read main file and embedded files
* @param metadataCache Access file caches (for parsing) and link resolution
* @param debug If True, will print debug output to console
* @returns List of content parts, ready for concatenation into OpenAI-style request
* @raises Error if conversion could not take place. This should not happen in normal operation.
*/
async function convertRangeToContentParts(
startOffset: number | null,
endOffset: number | null,
markdownFile: TFile,
vault: Vault,
metadataCache: MetadataCache,
debug: boolean
): Promise<OpenAI.Chat.Completions.ChatCompletionContentPart[]> {
const cache = metadataCache.getFileCache(markdownFile);
if (!cache) {
const errMsg = `convertRangeToContentParts() could not find cache for ${markdownFile.path}`;
console.error(errMsg);
// if we can't find the cache, there is something seriously wrong, so we interrupt processing completely
throw new Error(errMsg);
}
const embeds = cache?.embeds || [];
// get the contents so we can extract the text we need
const markdown = await vault.cachedRead(markdownFile);
const _startOffset = startOffset ?? 0;
const _endOffset = endOffset ?? markdown.length;
if (debug) {
console.log(
"convertRangeToContentParts()",
_startOffset,
_endOffset,
"EMBEDS:",
embeds
);
}
// track end of previous embed+1, or start of the whole block, so we can add text before / between embeds
let currentStart = _startOffset;
// intermediate list of text + embeds
const parts = [];
// experimentally: embedded external image links e.g. ![](https://some.url/image.jpg) do not get parsed as embeds
// docs at https://help.obsidian.md/Linking+notes+and+files/Embed+file do call this an embed though
let embed: EmbedCache | null = null;
for (embed of embeds) {
if (
embed.position.start.offset >= _startOffset &&
embed.position.end.offset <= _endOffset
) {
if (embed.position.start.offset > currentStart) {
// this means there's text before the embed, let's add it
// EditorPosition has ch and line
// however, note that CM6 prefers offsets to the old CM5 line-ch pairs: https://codemirror.net/docs/migration/#positions
// fortunately, Obsidian's Editor abstraction offers posToOffset and offsetToPos
parts.push({
type: "text",
// previously: AFAICS also from the CM6 docs on sliceDoc, getRange() excludes the end position
// now: slice() excludes the end position
text: markdown
.slice(currentStart, embed.position.start.offset)
.trim(),
});
}
// TODO: check that the embed is an image / other processable type
parts.push({
type: "embed",
embed,
});
currentStart = embed.position.end.offset + 1;
}
}
// take care of last bit of text
if (_endOffset > currentStart) {
parts.push({
type: "text",
text: markdown.slice(currentStart, _endOffset).trim(),
});
}
const contentParts: Array<OpenAI.ChatCompletionContentPart> = [];
for (const part of parts) {
if (part.type === "text" && part.text) {
contentParts.push(part as OpenAI.ChatCompletionContentPart);
} else if (part.type === "embed" && part.embed?.link) {
// obsidian can link to an embed without subdir; could be more than one file with that name
// getFirstLinkpathDest() with the correct sourcePath (second arg) should return the correct file
// note also that you HAVE to strip off #subpath from the link, else it returns null
const parsedLink = parseLinktext(part.embed.link);
const embeddedFile = metadataCache.getFirstLinkpathDest(
parsedLink.path,
markdownFile.path
);
if (embeddedFile) {
if (embeddedFile.extension === "md") {
let embeddedMarkdown = await vault.cachedRead(embeddedFile);
if (parsedLink.subpath) {
const embeddedCache =
metadataCache.getFileCache(embeddedFile);
if (embeddedCache) {
const subpath = resolveSubpath(
embeddedCache,
parsedLink.subpath
);
if (subpath) {
if (subpath.type === "heading") {
// when subpath.next (the next heading) is null, replace with undefined so substring goes to end of file
embeddedMarkdown =
embeddedMarkdown.substring(
subpath.current.position.start
.offset,
subpath.next?.position.start
.offset ?? undefined
);
} else {
// must be block
embeddedMarkdown =
embeddedMarkdown.substring(
subpath.block.position.start.offset,
subpath.block.position.end.offset
);
}
}
}
}
contentParts.push({ type: "text", text: embeddedMarkdown });
} else {
// if it's not markdown, it could be an image, so we try to load it as one
try {
// claude sonnet 3.5 image sizes: https://docs.anthropic.com/en/docs/build-with-claude/vision#evaluate-image-size
// longest edge should be < 1568
// openai gpt-4o
// we need either < 512x512 or < 2000x768 (low or high fidelity)
const { dataURL, x, y } = await imageToDataURL(
vault.getResourcePath(embeddedFile),
1568,
debug
);
// DEBUG: show image in the console -- working on 2024-06-27
if (debug) {
console.log(
"%c ",
`font-size:1px; padding: ${x}px ${y}px; background:url(${dataURL}) no-repeat; background-size: contain;`
);
console.log(dataURL);
console.log(
`Adding image "${part.embed.link}" at size ${x}x${y} to messages.`
);
}
contentParts.push({
type: "image_url",
image_url: {
url: dataURL,
},
});
} catch (e) {
console.error("Error copying image", embeddedFile, e);
}
}
}
}
}
return contentParts;
}
interface IThreadMessages {
messages: OpenAI.ChatCompletionMessageParam[];
heading: HeadingCache;
rangeEnd: EditorPosition;
}
// find current cursor position, determine its heading path, then convert that path into messages
// app needed for: metadataCache, vault
// editor needed for: getCursor, getLine, lastLine, getRange, etc.
async function convertCurrentThreadToMessages(
markdownFile: TFile,
systemMessage: string,
app: App,
editor: Editor,
debug = false
): Promise<IThreadMessages> {
const cache = app.metadataCache.getFileCache(markdownFile);
if (!cache)
throw new Error(`Could not find cache for ${markdownFile.path}`);
const headings = cache.headings || [];
// find heading containing the cursor, and then the path of containing headings up the tree
const headingPath = [];
let currentHeading = null;
for (let i = headings.length - 1; i >= 0; i--) {
const heading = headings[i];
if (currentHeading) {
// we've already found currentHeading, containing the cursor
// so here we're tracing the path from the cursor up to the topmost heading
if (
heading.position.start.line <
currentHeading.position.start.line &&
heading.level < currentHeading.level
) {
headingPath.unshift(i);
currentHeading = heading;
}
} else {
// we are still searching for the currentHeading (containing the cursor)
if (heading.position.start.line <= editor.getCursor().line) {
// ok we found the heading containing the cursor, start from here
headingPath.unshift(i);
currentHeading = heading;
}
}
}
if (!currentHeading)
throw new Error(`No headings to work with in ${markdownFile.path}`);
const messages = initMessages(systemMessage);
// we want to return the last rangeEnd, so that the calling code can move the cursor there
let rangeEnd: EditorPosition = { line: 0, ch: 0 };
let heading = null;
// we want to find embeds in the range
let rangeEndOffset = -1;
for (const i of headingPath) {
// determine current heading to next heading / end of file block
heading = headings[i];
const nextHeading = headings[i + 1];
if (nextHeading) {
const line = nextHeading.position.start.line - 1;
rangeEnd = {
line: line,
ch: editor.getLine(line).length,
};
rangeEndOffset = nextHeading.position.start.offset - 1;
} else {
// this is the last heading, so we have to use end of file
const lastLine = editor.lastLine();
rangeEnd = {
line: lastLine,
ch: editor.getLine(lastLine).length,
};
rangeEndOffset = editor.getValue().length;
}
const uHeading = heading.heading.toUpperCase();
const role =
uHeading.startsWith("AI") || uHeading.startsWith("ASSISTANT")
? "assistant"
: "user";
if (role === "assistant") {
// assistant can only have content as a single string!
const m = editor.getRange(
{ line: heading.position.end.line + 1, ch: 0 },
rangeEnd
);
messages.push({ role: role, content: m });
} else {
// this is a user message, so we do multi-part / ContentPart[]
const startOffset = heading.position.end.offset + 1;
const endOffset = rangeEndOffset;
// raised exceptions will propagate to convertThreadToMessages()'s caller and be shown as a notice
const contentParts = await convertRangeToContentParts(
startOffset,
endOffset,
markdownFile,
app.vault,
app.metadataCache,
debug
);
messages.push({
role: role,
content: contentParts,
});
}
}
if (!heading) {
const errMsg = "Really unexpected that we have no last heading here.";
console.error(errMsg);
throw new Error(errMsg);
}
return { messages, heading, rangeEnd };
}
// replace range, but also move cursor ahead to be located right after the inserted multi-line text
function replaceRangeMoveCursor(editor: Editor, text: string) {
const cursor = editor.getCursor();
editor.replaceRange(text, cursor);
const lines = text.split("\n");
editor.setCursor({
line: cursor.line + lines.length - 1,
// if only one line, we have to add the new text length to existing
// if more than one line, then the final line determines the ch position
ch:
(lines.length === 1 ? cursor.ch : 0) +
lines[lines.length - 1].length,
});
}
export default class AIChatAsMDPlugin extends Plugin {
settings: AIChatAsMDSettings;
async onload() {
await this.loadSettings();
// This adds a status bar item to the bottom of the app. Does not work on mobile apps.
// const statusBarItemEl = this.addStatusBarItem();
// statusBarItemEl.setText("AICM loaded");
this.addCommand({
id: "complete-thread",
name: "Send current thread to AI",
icon: "bot-message-square",
// https://docs.obsidian.md/Plugins/User+interface/Commands#Editor+commands
editorCallback: async (editor: Editor, view: MarkdownView) => {
// await view.save();
const markdownFile = view.file;
if (!markdownFile) {
new Notice("No markdown file open");
return;
}
const systemPrompt = await this.getSystemPrompt(markdownFile);
if (!systemPrompt) {
return;
}
let mhe: IThreadMessages;
try {
mhe = await convertCurrentThreadToMessages(
markdownFile,
systemPrompt,
this.app,
editor,
this.settings.debug
);
} catch (e) {
new Notice(`Error converting thread to messages: ${e}`);
return;
}
editor.setCursor(mhe.rangeEnd);
const model = this.getRequestedModel(markdownFile);
// create heading that's one level deeper than the one we are replying to
const aiLevel = mhe.heading.level + 1;
let aiHeading = `\n\n${"#".repeat(aiLevel)} AI`;
// if the user configured it, show the used model in the heading
if (this.settings.showUsedModel) {
aiHeading += ` (model:: ${model})`;
}
aiHeading += "\n";
replaceRangeMoveCursor(editor, aiHeading);
if (this.settings.debug) {
console.log("About to send to AI:", mhe.messages);
}
// DEBUG bypass OpenAI
// return null;
try {
const stream = await this.getOpenAIStream(
mhe.messages,
model
);
// statusBarItemEl.setText("AICM streaming...");
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || "";
replaceRangeMoveCursor(editor, content);
if (chunk.usage) {
console.log("OpenAI API usage:", chunk.usage);
}
}
//statusBarItemEl.setText("AICM done.");
} catch (e) {
this.handleOpenAIError(e);
}
// BUG: on iPhone, this sometimes starts before the last 2 or 3 characters of AI message
const userHeading = `\n\n${"#".repeat(aiLevel + 1)} User\n`;
replaceRangeMoveCursor(editor, userHeading);
},
});
this.addCommand({
id: "complete-selection",
name: "Send selected text to AI and append the response",
icon: "bot-message-square",
// https://docs.obsidian.md/Plugins/User+interface/Commands#Editor+commands
editorCallback: async (editor: Editor, view: MarkdownView) => {
await this.completeSelection(editor, view);
},
});
this.addCommand({
id: "complete-selection-and-replace",
name: "Send selected text to AI and REPLACE it with the response",
icon: "bot-message-square",
// https://docs.obsidian.md/Plugins/User+interface/Commands#Editor+commands
editorCallback: async (editor: Editor, view: MarkdownView) => {
await this.completeSelection(editor, view, "replace");
},
});
// This adds a settings tab so the user can configure various aspects of the plugin
this.addSettingTab(new AIChatAsMDSettingsTab(this.app, this));
}
onunload() {}
async loadSettings() {
this.settings = Object.assign(
{},
DEFAULT_SETTINGS,
await this.loadData()
);
}
async saveSettings() {
await this.saveData(this.settings);
}
/**
* Determine which model to use for the current markdown file
*
* If the file has a frontmatter key aicmd-model, that will be used, else the default configured model.
*
* @param markdownFile the markdown file from which the frontmatter is to be read
*/
getRequestedModel(markdownFile: TFile) {
const cache = this.app.metadataCache.getFileCache(markdownFile);
const model =
cache?.frontmatter?.["aicmd-model"] ?? this.settings.model;
if (this.settings.debug) {
console.log(`Using model ${model} for "${markdownFile.path}"`);
}
return model;
}
/**
* Determine whether user wants to use a system prompt file and if so which one.
*
* If the file has a frontmatter key aicmd-system-prompt-file, that will be used, else the default configured system prompt file, else the default configured system prompt text.
*
* @param markdownFile File that the user is working on
* @returns Path to the system prompt file to use or empty string "" if the user wants to use the default system prompt text.
*/
getRequestedSystemPromptFile(markdownFile: TFile): string {
const cache = this.app.metadataCache.getFileCache(markdownFile);
const systemPromptFile =
cache?.frontmatter?.["aicmd-system-prompt-file"] ??
this.settings.systemPromptFile;
if (this.settings.debug) {
console.log(
`Using system prompt file ${systemPromptFile} for "${markdownFile.path}"`
);
}
return systemPromptFile;
}
handleOpenAIError(e: Error) {
// this will give a nice traceback in the console
console.error("Error while streaming from OpenAI:", e);
// delay=0 so that the notice stays up until the user dismisses it
new Notice(
`An error occurred while communicating with the OpenAI-style service. Details: ${e}`,
0
);
}
async getOpenAI() {
const openai = new OpenAI({
// "https://openrouter.ai/api/v1" or "https://api.openai.com/v1"
baseURL: `${this.settings.apiHost}/v1`,
apiKey: this.settings.openAIAPIKey,
defaultHeaders: {
"HTTP-Referer":
"https://github.com/cpbotha/obsidian-ai-chat-as-md", // Optional, for including your app on openrouter.ai rankings.
"X-Title": "Obsidian AI Chat as Markdown", // Optional. Shows in rankings on openrouter.ai.
},
// we are running in a browser environment, but we are using obsidian settings to get keys, so we can enable this
dangerouslyAllowBrowser: true,
});
return openai;
}
async getOpenAIStream(
messages: OpenAI.ChatCompletionMessageParam[],
model: string
) {
const openai = await this.getOpenAI();
return openai.chat.completions.create({
model: model,
messages: messages,
stream: true,
});
}
async getSystemPrompt(markdownFile: TFile) {
const systemPromptFilename =
this.getRequestedSystemPromptFile(markdownFile);
if (systemPromptFilename) {
// we expect the user to specify the actual file path relative to the vault, and to include the extension,
// e.g. "prompts/productivity coach.md". We considered using the same path resolution as for embedded files
// (see metadataCache.getFirstLinkpathDest()), but that only makes sense for locally specified prompts, not
// those specified in the global plugin configuration.
const systemPromptFile =
this.app.vault.getFileByPath(systemPromptFilename);
if (!systemPromptFile) {
new Notice(
`AI Chat as MD could not read system prompt file "${systemPromptFilename}". Please check its path in the plugin settings or in this file's frontmatter.`
);
return null;
}
let sysContentParts: OpenAI.Chat.Completions.ChatCompletionContentPart[];
try {
sysContentParts = await convertRangeToContentParts(
null,
null,
systemPromptFile,
this.app.vault,
this.app.metadataCache,
this.settings.debug
);
} catch (e) {
new Notice(
`Error parsing system prompt file "${systemPromptFilename}": ${e}`
);
return null;
}
// concatenate all of the "text" members
// effectively throwing out type == "image"
// until there are models that can take image as part of their system prompts
const systemPrompt = sysContentParts
.filter((part) => part.type === "text")
.map((part: OpenAI.ChatCompletionContentPartText) => part.text)
.join("\n");
return systemPrompt;
}
return this.settings.systemPrompt;
}
async completeSelection(
editor: Editor,
view: MarkdownView,
mode: "replace" | "append" = "append"
) {
if (!editor.somethingSelected()) {
return;
}
const markdownFile = view.file;
if (!markdownFile) {
new Notice("No markdown file open");
return;
}
// await view.save();
const cache = this.app.metadataCache.getFileCache(markdownFile);
if (!cache) return null;
// from..to could be flipped if user selected from the back to the front
// we make sure that it's from lowest to highest offset
// BTW: wow javascript, making me supply a compareFn to sort numbers seesh!
const [selStartOffset, selEndOffset] = [
editor.getCursor("from"),
editor.getCursor("to"),
]
.map((pos) => editor.posToOffset(pos))
.sort((a, b) => a - b);
const systemPrompt = await this.getSystemPrompt(markdownFile);
if (!systemPrompt) {
return;
}
const messages = initMessages(systemPrompt);
try {
messages.push({
role: "user",
content: await convertRangeToContentParts(
selStartOffset,
selEndOffset,
markdownFile,
this.app.vault,
this.app.metadataCache,
this.settings.debug
),
});
} catch (e) {
new Notice(
`Error converting selection to OpenAI-style messages: ${e}`
);
return;
}
if (this.settings.debug) {
console.log("About to send to AI:", messages);
}
const model = this.getRequestedModel(markdownFile);
try {
const stream = await this.getOpenAIStream(messages, model);
//statusBarItemEl.setText("AICM streaming...");
if (mode === "append") {
// in case the user selected from back to front, we move the cursor to the end
editor.setCursor(editor.offsetToPos(selEndOffset));
replaceRangeMoveCursor(editor, "\n\n");
} else {
editor.replaceSelection("");
}
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || "";
replaceRangeMoveCursor(editor, content);
if (chunk.usage) {
console.log("OpenAI API usage:", chunk.usage);
}
}
} catch (e) {
this.handleOpenAIError(e);
}
replaceRangeMoveCursor(editor, "\n");
//statusBarItemEl.setText("AICM done.");
}
}
class AIChatAsMDSettingsTab extends PluginSettingTab {
plugin: AIChatAsMDPlugin;
constructor(app: App, plugin: AIChatAsMDPlugin) {
super(app, plugin);
this.plugin = plugin;
}
display(): void {
const { containerEl } = this;
containerEl.empty();
new Setting(containerEl)
.setName("API host")
.setDesc(
"OpenAI-style API host, e.g. https://api.openai.com or https://openrouter.ai/api"
)
.addText((text) =>
text
.setPlaceholder(
"Enter the API host, e.g. https://api.openai.com"
)
.setValue(this.plugin.settings.apiHost)
.onChange(async (value) => {
this.plugin.settings.apiHost = value;
await this.plugin.saveSettings();
})
);
new Setting(containerEl)
.setName("API key")
.setDesc("Usually of the form sk-xxxx")
.addText((text) =>
text
.setPlaceholder("Enter your secret")
.setValue(this.plugin.settings.openAIAPIKey)
.onChange(async (value) => {
this.plugin.settings.openAIAPIKey = value;
await this.plugin.saveSettings();
})
);
new Setting(containerEl)
.setName("Model name")
.setDesc(
"E.g. gpt-4o for OpenAI or anthropic/claude-3.5-sonnet for OpenRouter"
)
.addText((text) =>
text
.setPlaceholder("gpt-4o")
.setValue(this.plugin.settings.model)
.onChange(async (value) => {
this.plugin.settings.model = value;
await this.plugin.saveSettings();
})
);
let systemPromptTextArea: TextAreaComponent;
const systemPromptSetting = new Setting(containerEl)
.setName("System prompt")
.addTextArea((textArea) => {
systemPromptTextArea = textArea;
return textArea
.setPlaceholder("Enter the system prompt")
.setValue(this.plugin.settings.systemPrompt)
.onChange(async (value) => {
this.plugin.settings.systemPrompt = value;
await this.plugin.saveSettings();
});
})
.addExtraButton((button) => {
button
.setIcon("lucide-rotate-ccw")
.setTooltip("Reset to default")
.onClick(() => {
systemPromptTextArea.setValue(
DEFAULT_SETTINGS.systemPrompt
);
// setValue() above does not trigger the onChange() hanndler
// so here we help it
systemPromptTextArea.onChanged();
});
});
new Setting(containerEl).setName("Advanced").setHeading();
// this.app.vault.getMarkdownFiles()
// the path in each of these files is relative to the vault, which is exactly what I want for this.app.vault.getFileByPath()
new Setting(containerEl)
.setName("Use (markdown) file as system prompt")
.setDesc(
"Enter the path, relative to your vault, of any file that the plugin should use as the system prompt, " +
"instead of the text above. " +
"Examples: `sysprompt-swdev.md`, `top-folder/system prompt 1.md`"
)
.addText((text) => {
text.setValue(this.plugin.settings.systemPromptFile).onChange(
async (value) => {
this.plugin.settings.systemPromptFile = value.trim();
await this.plugin.saveSettings();
//systemPromptSetting.setDisabled(value !== "");
}
);
});
new Setting(containerEl)
.setName("Show used model")
.setDesc("Add used model to the end of each AI heading")
.addToggle((toggle) =>
toggle
.setValue(this.plugin.settings.showUsedModel)
.onChange(async (value) => {
this.plugin.settings.showUsedModel = value;
await this.plugin.saveSettings();
})
);
new Setting(containerEl)
.setName("Debug mode")
.setDesc("Debug output in developer console")
.addToggle((toggle) =>
toggle
.setValue(this.plugin.settings.debug)
.onChange(async (value) => {
this.plugin.settings.debug = value;
await this.plugin.saveSettings();
})
);
}
}