Skip to content

Commit

Permalink
refactor: Use coroutines for LangChain4j interactions
Browse files Browse the repository at this point in the history
  • Loading branch information
fmueller committed Sep 10, 2024
1 parent bf333d3 commit 55d9825
Showing 1 changed file with 17 additions and 10 deletions.
27 changes: 17 additions & 10 deletions src/main/kotlin/com/github/fmueller/jarvis/ai/OllamaService.kt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import dev.langchain4j.model.ollama.OllamaStreamingChatModel
import dev.langchain4j.service.AiServices
import dev.langchain4j.service.TokenStream
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.ExperimentalCoroutinesApi
import kotlinx.coroutines.suspendCancellableCoroutine
import kotlinx.coroutines.withContext
import kotlinx.serialization.Serializable
import kotlinx.serialization.decodeFromString
Expand All @@ -15,7 +17,7 @@ import java.net.http.HttpClient
import java.net.http.HttpRequest
import java.net.http.HttpResponse
import java.time.Duration
import java.util.concurrent.CompletableFuture
import kotlin.coroutines.resumeWithException

@Serializable
private data class ChatMessage(val role: String, val content: String)
Expand Down Expand Up @@ -63,22 +65,27 @@ object OllamaService {
.systemMessageProvider { chatMemoryId -> systemMessage }
.build()

@OptIn(ExperimentalCoroutinesApi::class)
suspend fun chatLangChain4J(conversation: Conversation): String = withContext(Dispatchers.IO) {

Check warning on line 69 in src/main/kotlin/com/github/fmueller/jarvis/ai/OllamaService.kt

View workflow job for this annotation

GitHub Actions / Qodana Community for JVM

Unused symbol

Function "chatLangChain4J" is never used
// TODO check if model is available
// TODO if not, download model

// TODO migration to LangChain4J: change to Kotlin Coroutines
// TODO migration to LangChain4J: add timeout handling
// TODO migration to LangChain4J: research how token limits work and context windows
// TODO migration to LangChain4J: research how chat memory is configured
val future = CompletableFuture<String>()
assistant
.chat(conversation.getLastUserMessage()?.content ?: "Tell me that there was not message provided.")
.onNext { update -> conversation.addToMessageBeingGenerated(update) }
.onComplete { response -> future.complete(response.content().text()) }
.onError { error -> future.complete("Error: ${error.message}") }
.start()
future.get()

suspendCancellableCoroutine<String> { continuation ->

Check notice on line 77 in src/main/kotlin/com/github/fmueller/jarvis/ai/OllamaService.kt

View workflow job for this annotation

GitHub Actions / Qodana Community for JVM

Unnecessary type argument

Remove explicit type arguments
assistant
.chat(conversation.getLastUserMessage()?.content ?: "Tell me that there was no message provided.")
.onNext { update -> conversation.addToMessageBeingGenerated(update) }
.onComplete { response -> continuation.resume(response.content().text()) { t -> /* noop */ } }
.onError { error -> continuation.resumeWithException(Exception("Error: ${error.message}")) }
.start()

continuation.invokeOnCancellation {
// TODO when LangChain4j implemented AbortController, call it here
}
}
}

suspend fun chat(conversation: Conversation): String = withContext(Dispatchers.IO) {
Expand Down

0 comments on commit 55d9825

Please sign in to comment.