diff --git a/.changeset/real-tips-serve.md b/.changeset/real-tips-serve.md new file mode 100644 index 0000000..b60d077 --- /dev/null +++ b/.changeset/real-tips-serve.md @@ -0,0 +1,5 @@ +--- +"inform-ai": patch +--- + +Better README diff --git a/.changeset/sharp-lies-build.md b/.changeset/sharp-lies-build.md new file mode 100644 index 0000000..9cfd697 --- /dev/null +++ b/.changeset/sharp-lies-build.md @@ -0,0 +1,5 @@ +--- +"inform-ai": minor +--- + +Adopt MIT license diff --git a/README.md b/README.md index a519c0b..d1cf358 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,14 @@ -# InformAI - Easy & Useful AI for React apps +# InformAI - Context-Aware AI Integration for React Apps -InformAI allows you to easily retrofit context-aware AI into any React application. It works really well alongside the excellent [Vercel AI SDK](https://sdk.vercel.ai/docs/introduction). It shines when used in combination with Next JS and especially React Server Components, but these are not mandatory. +**InformAI** is a tool that enables seamless integration of context-aware AI into any React application. It's designed to work effortlessly with the [Vercel AI SDK](https://sdk.vercel.ai/docs/introduction), but is also compatible with other AI SDK providers. -InformAI provides a simple way to expose the state of your React components to any LLM. It also lets your components publish events, like user clicks or other interactions. It makes it easy to send this information in an LLM-optimized format to calls to your LLM. It doesn't send stuff to the LLM, but it makes it easy to integrate with libraries that do (like the Vercel AI SDK). +### Key Features: -It works with both traditional client side React components as well as React Server Components. +- **Contextual AI Integration**: Easily expose the state of your React components to an LLM (Large Language Model) or other AI, providing valuable context with minimal effort. +- **Event Publishing**: Allow your components to publish events, like user interactions, in an LLM-optimized format. +- **Flexible Usage**: Works well with both client-side React components and React Server Components. Though it excels with Next.js and React Server Components, these are not required. + +InformAI doesn't directly send data to your LLM but simplifies integration with tools like the Vercel AI SDK, making it easy to incorporate AI into your app. ## Installation @@ -14,160 +18,51 @@ Install the NPM package: npm install inform-ai ``` -Include the stylesheet if you plan to use the included UI components: +Include the stylesheet if you plan to use the included UI components (or don't, if you want to use them but customize their appearance): ```tsx import "inform-ai/dist/main.css"; ``` -## Conceptual Overview - -InformAI allows you to expose the state of any number of your React components to an AI - usually an LLM. It also allows components to notify the LLM of events that have occurred, such as the user clicking something, entering text, or some other interaction with the Component. - -Under the covers, InformAI creates an array of component-generated messages that will be sent to the LLM along with whatever message the user types in to your chat box (a simple ChatBox component is included in InformAI). When you are ready to execute the LLM (usually in response to the user entering some text into a chat box), these component-generated messages are processed, deduped, made LLM-friendly and passed along with your user's message. - -InformAI contains the following: - -- A React Context Provider, created by `createInformAI()` - analogous to Vercel AI SDK's [createAI](https://sdk.vercel.ai/docs/reference/ai-sdk-rsc/create-ai) function -- A React Component called ``, which takes a name, a prompt to pass to the LLM, and arbitrary props -- A React hook called `useInformAI`, which allows you to do the same as `` but programmatically -- A handful of pre-built UI components like a `ChatBox`, `CurrentState` (to debug what InformAI currently knows about) and simple message wrappers +## Installing the Provider -## Usage - -InformAI doesn't integrate directly with an LLM itself - there are several libraries out there already that do that. One of the best of the bunch is the Vercel AI SDK -This assumes that you have both the Vercel AI SDK and inform-ai npm packages installed. - -To use InformAI, first we need to define our Vercel AI Provider and our InformAI Provider (we will come back to the definition of `submitUserMessage` later): +InformAI can be used via either the `` Component or the `useInformAI` hook. Either way, you need to wrap any components using InformAI inside an `InformAIProvider`: ```tsx -"use server"; - -import { CoreMessage, generateId } from "ai"; -import { createAI } from "ai/rsc"; - import { InformAIProvider } from "inform-ai"; -import { submitUserMessage } from "../actions/AI"; - -export type ClientMessage = CoreMessage & { - id: string; -}; - -export type AIState = { - chatId: string; - messages: ClientMessage[]; -}; - -export type UIState = { - id: string; - role?: string; - content: React.ReactNode; -}[]; - -export const AIProvider = createAI({ - actions: { - submitUserMessage, - }, - initialUIState: [] as UIState, - initialAIState: { chatId: generateId(), messages: [] } as AIState, -}); - -export default async function AIProviders({ children }: { children: React.ReactNode }) { - return ( - - {children} - - ); -} -``` - -Most of that was just setting up Vercel AI SDK and defining a couple of TypeScript types for the messages sent between the user and the LLM. As a convenience, we created and exported a React component called ``, which just injects both the Vercel AI Provider and the InformAI Provider into your React component tree. - -Inside your `layout.tsx`, we'll add that `` component, along with the `` component that is bundled with inform-ai. `` is not intended to be used in production, but helps you keep track of what information you have surfaced to the LLM as you develop your code: - -```tsx -import "inform-ai/dist/main.css"; -import "./globals.css"; - -//the 2 Providers you just created -import { AIProviders } from "./providers/AI"; -//optionally include the CurrentState component for easier InformAI debugging -import { CurrentState } from "inform-ai"; - -export default function RootLayout({ - children, -}: Readonly<{ - children: React.ReactNode; -}>) { - return ( - - - - {children} - - - - - ); -} +//somewhere in your layout.tsx or similar: +{children}; ``` -### Exposing Component state and events - -Now that we have set up the Providers, exposing Component state to InformAI is easy: +## Exposing Component state -#### Page-level integration - -The fastest way to add InformAI to your app is by doing so at the page level. Below is an example from the [lansaver application](https://github.com/edspencer/lansaver), which is a nextjs app that backs up configurations for network devices like firewalls and managed switches ([see the full SchedulePage component here](https://github.com/edspencer/lansaver/blob/main/app/schedules/%5Bid%5D/page.tsx)). - -This is a React Server Component, rendered on the server. It imports the `` React component, defines a `prompt` string to help the LLM understand what this component does, and then renders `` with a meaningful component `name`, the `prompt`, and an arbitrary `props` object, which is passed to the LLM in addition to the name and prompt: +Now, within any React component that will be rendered inside that `InformAIProvider`, you can insert a `` node: ```tsx import { InformAI } from "inform-ai"; -const prompt = `A page that shows the details of a schedule. It should show the schedule's configuration, the devices in the schedule, and recent jobs for the schedule. It should also have buttons to run the schedule, edit the schedule, and delete the schedule.`; - -export default async function SchedulePage({ params: { id } }: { params: { id: string } }) { - const schedule = await getSchedule(parseInt(id, 10)); - - if (!schedule) { - return notFound(); - } - - const devices = await getScheduleDevices(schedule.id); - const jobs = await recentJobs(schedule.id); +const prompt = "Shows the life history of a person, including their name, title and age"; +export function Bio({ name, title, age }) { return ( -
- -
- Schedule Details -
- - - - -
-
-
- - -
- Recent Jobs - - {jobs.length ? : null} - {/* */} +
+ + //... rest of the component here
); } ``` -In this case we passed the `schedule` (a row from the database), `devices` (an array of device database rows) and `jobs` (an array of recent backup jobs) to the LLM, but we could have passed anything into `props`, so long as it is serializable into JSON. Next time the user sends the LLM a message, it will also receive all of the context we just exposed to it about this page, so can answer questions about what the user is looking at. +Adding the `` tag to our component we were able to tell the LLM 3 things about our component: -#### Component-level integration +- **name** - a meaningful name for this specific component instance +- **props** - any props we want to pass to the LLM (must be JSON-serializable) +- **prompt** - a string to help the LLM understand what the component does -When possible, it is usually better to use InformAI at the component level rather than the page level. Here's an example taken from the [backups table from lansaver](https://github.com/edspencer/lansaver/blob/main/components/backup/table.tsx), showing how to achieve this with the `useInformAI` hook: +### useInformAI + +An alternative to the `` component is to use the `useInformAI` hook. `useInformAI` is a little more versatile than ``. Here's a slightly simplified example taken from the [backups table from lansaver](https://github.com/edspencer/lansaver/blob/main/components/backup/table.tsx), showing how to use `useInformAI` instead of ``: ```tsx import { useInformAI } from "inform-ai"; @@ -196,33 +91,15 @@ export function BackupsTable({ return ; } - return ( - - - - ID - {showDevice && Device} - Status - Date - Size - Actions - - - - {backups.map((backup) => ( - - ))} - -
- ); + return //your table implementation
; } ``` -Note that we could have used the `` React component again in this case instead of `useInformAI()`, but it was useful to use the hook in this case as we render a different table if `condensed` is set to true. +It was useful to use the hook in this case as we render a different table if `condensed` is set to true, but we wanted to surface the same information either way to InformAI, so by using 'useInformAI' we didn't need to maintain 2 duplicate copies of an `` tag in our 2 table components. -#### Sending Component Events +## Exposing Component events -In addition to exposing Component state via `` and `useInformAI`, Components may expose events such as clicks and other user interactions. +Another possibility that is unlocked by using `useInformAI` is telling the LLM about component events like clicks or other user interactions: Here's an example of a different Table component, which can render arbitrary data and exposes `click` events when the user clicks on a table cell: @@ -236,7 +113,7 @@ keys of the first object in the data array as the column headers. The component will render the table with the provided data and column headers.`; export function Table({ data, colHeaders, name = "Table", informPrompt = defaultPrompt, header }: TableProps) { - const { addEvent, updateState } = useInformAI({ + const { addEvent } = useInformAI({ name, prompt: informPrompt, props: { @@ -254,7 +131,7 @@ export function Table({ data, colHeaders, name = "Table", informPrompt = default }; return ( -
+
{header} @@ -281,28 +158,107 @@ export function Table({ data, colHeaders, name = "Table", informPrompt = default } ``` -The `type` and `description` we pass can be any strings we like, and will be exposed to the LLM next time we trigger it. +The `type` and `description` we pass can be any strings we like. + +`useInformAI` + +## Viewing Current State -### Processing messages from the user +Under the covers, InformAI collects together all of the component state and event messages that are published by `` and `useInformAI`. While in development, it's useful to be able to see what InformAI is aware of, and what will be sent with the next user message to the LLM. -Now we just need an endpoint that will accept messages from our user, along with the `state` and `event` messages that our Components have created. +InformAI ships with a small React component called `` which can be rendered anywhere inside your component tree, and will show you all of the component states and events that InformAI has collected. -Inside a file like `actions/AI.tsx`, we can define a `submitUserMessage` function like this (it doesn't have to be called `submitUserMessage`, just has to match whatever you passed into your AI Provider in the first step, and call from the UI in the next step): +Drop this into your layout.tsx like so: ```tsx -"use server"; +import "inform-ai/dist/main.css"; +import "./globals.css"; -import { getMutableAIState, streamUI } from "ai/rsc"; -import { openai } from "@ai-sdk/openai"; -import { Spinner } from "@/components/common/spinner"; +//optionally include the CurrentState component for easier InformAI debugging +import { InformAIProvider, CurrentState } from "inform-ai"; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + + {children} + + + + + ); +} +``` + +`` accepts a `className` so you can style.position it however you like (this example has it pinned top right). It will collapse/expand when you click the component heading if it's getting in the way. + +![CurrentState component](/docs/current-state-example.png) + +`` is intended to help understand/debug in development, and is not something you'd likely ship to your users. Each time a component registers a state or event update, a row is added to CurrentState with the ability to dig down into a JSON view of all of the information. + +## Adding a Chatbot + +How you add your Chatbot UI is completely up to you. InformAI works well alongside the Vercel AI SDK (`npm install ai`), and provides a couple of rudimentary chatbot UI components out of the box that use Vercel AI SDK. + +Here's how you can use that to create your own simple `ChatBot` component using the Vercel AI SDK and InformAI: + +```tsx +"use client"; + +import { ChatWrapper } from "inform-ai"; +import { useActions, useUIState } from "ai/rsc"; + +export function ChatBot() { + const { submitUserMessage } = useActions(); + const [messages, setMessages] = useUIState(); + + return ; +} +``` + +InformAI exposes `ChatBox` and `Messages` components, along with a `ChatWrapper` that just combines them both into an easy package. `ChatBox` is a fairly simple form with a text input and a button to submit the user's message, and `Messages` just renders the conversation between the user and the LLM assistant. + +Because the Vercel AI SDK is awesome, `Messages` can handle streaming LLM responses as well as streaming React Server Components (if you're using nextjs or similar). Here's an example of a conversation using `ChatWrapper`: + +![Example Chat on Schedules page](/docs/inform-ai-chat-example.png) +You're highly encouraged to check out the [ChatWrapper source](/src/ui/ChatWrapper.tsx) as well as that for [ChatBox](/src/ui/ChatBox.tsx) and [Messages](/src/ui/Messages.tsx) - they're all pretty straightforward components so you can use all, some or none of them in your app. + +### Vercel AI backend for this example + +To get that `ChatBot` component to work, we actually need 2 more things: + +- A Vercel `` in our React tree +- A `submitUserMessage` function + +We can define those both in a single file, something like this: + +```tsx +"use server"; + +import { CoreMessage, generateId } from "ai"; +import { createAI } from "ai/rsc"; import { AssistantMessage } from "inform-ai"; -import { generateId } from "ai"; -import { AIState, ClientMessage } from "../providers/AI"; +export type ClientMessage = CoreMessage & { + id: string; +}; + +export type AIState = { + chatId: string; + messages: ClientMessage[]; +}; -import RedirectTool from "../tools/Redirect"; -import BackupsTableTool from "../tools/BackupsTable"; +export type UIState = { + id: string; + role?: string; + content: React.ReactNode; +}[]; export async function submitUserMessage(messages: ClientMessage[]) { const aiState = getMutableAIState(); @@ -313,39 +269,23 @@ export async function submitUserMessage(messages: ClientMessage[]) { messages: [...aiState.get().messages, ...messages], }); - //set up our streaming LLM response, with a couple of tools, a prompt and some onSegment logic - //to add any tools and text responses from the LLM to the AI State + //set up our streaming LLM response using Vercel AI SDK const result = await streamUI({ model: openai("gpt-4o-2024-08-06"), - initial: , - system: `\ - You are a helpful assistant who can blah blah blah - - give your LLM detailed instructions about your app here`, - messages: [ - ...aiState.get().messages.map((message: any) => ({ - role: message.role, - content: message.content, - name: message.name, - })), - ], + system: "You are a helpful assistant who blah blah blah", + messages: aiState.get().messages, text: ({ content, done }) => { if (done) { - //store the LLM's response into AIState - aiState.update({ + //save the LLM's response to our AIState + aiState.done({ ...aiState.get(), messages: [...aiState.get().messages, { role: "assistant", content }], }); - - // console.log(aiState.get().messages); //if you want to see the message history - aiState.done(aiState.get()); } + //AssistantMessage is a simple, styled component that supports streaming text/UI responses return ; }, - tools: { - redirect: RedirectTool, - backupsTable: BackupsTableTool, - }, }); return { @@ -353,19 +293,105 @@ export async function submitUserMessage(messages: ClientMessage[]) { content: result.value, }; } + +export const AIProvider = createAI({ + actions: { + submitUserMessage, + }, + initialUIState: [] as UIState, + initialAIState: { chatId: generateId(), messages: [] } as AIState, +}); +``` + +This gives us our `submitUserMessage` and `` exports. All you need to do now is add the `` into your React component tree, just like we did with ``, and everything should Just Work. The `useActions()` hook we used in our `ChatBot.tsx` will be able to pull out our `submitUserMessage` function and pass it to `ChatWrapper`, which will then call it when the user enters and sends a message. + +The AIState management we do there is to keep a running context of the conversation so far - see the [Vercel AI SDK AIState docs](https://sdk.vercel.ai/examples/next-app/state-management/ai-ui-states) if you're not familiar with that pattern. + +The `text` prop we passed to `streamUI` is doing 2 things - rendering a pretty `` bubble for the streaming LLM response, and saving the finished LLM response into the AIState history when the LLM has finished its answer. This allows the LLM to see the whole conversation when the user sends follow-up messages, without the client needing to send the entire conversation each time. + +### Tips & Tricks + +#### Page-level integration + +The fastest way to add InformAI to your app is by doing so at the page level. Below is an example from the [lansaver application](https://github.com/edspencer/lansaver), which is a nextjs app that backs up configurations for network devices like firewalls and managed switches ([see the full SchedulePage component here](https://github.com/edspencer/lansaver/blob/main/app/schedules/%5Bid%5D/page.tsx)). + +This is a React Server Component, rendered on the server. It imports the `` React component, defines a `prompt` string to help the LLM understand what this component does, and then renders `` with a meaningful component `name`, the `prompt`, and an arbitrary `props` object, which is passed to the LLM in addition to the name and prompt: + +```tsx +import { InformAI } from "inform-ai"; + +const prompt = `A page that shows the details of a schedule. It should show the schedule's configuration, the devices in the schedule, and recent jobs for the schedule. It should also have buttons to run the schedule, edit the schedule, and delete the schedule.`; + +export default async function SchedulePage({ params: { id } }: { params: { id: string } }) { + const schedule = await getSchedule(parseInt(id, 10)); + + if (!schedule) { + return notFound(); + } + + const devices = await getScheduleDevices(schedule.id); + const jobs = await recentJobs(schedule.id); + + return ( +
+ +
+ Schedule Details +
+ + + + +
+
+
+ + +
+ Recent Jobs + + {jobs.length ? : null} + {/* */} +
+ ); +} ``` -Here we're using the Vercel AI SDK's `streamUI` function, which allows us to easily send back streaming text like a basic chatbot, but also streaming UI in the shape of our React Server Components. +In this case we passed the `schedule` (a row from the database), `devices` (an array of device database rows) and `jobs` (an array of recent backup jobs) to the LLM, but we could have passed anything into `props`, so long as it is serializable into JSON. Next time the user sends the LLM a message, it will also receive all of the context we just exposed to it about this page, so can answer questions about what the user is looking at. -All of the InformAI messages along with whatever the user wants to ask for will be passed in via the `messages` argument to this function. We'll see how that happens in the final section below. +When possible, it is usually better to use InformAI at the component level rather than the page level to take advantage of React's composability, but it's really up to you. #### Streaming UI using Tools -In the code snippet above we defined 2 tools that the LLM can execute if it thinks it makes sense to do so. If the tool has a `generate` function, it can render arbitrary React components that will be streamed to the browser. +We can extend our use of streamUI and other functions like it by providing tools definitions for the LLM to choose from. The streamUI() function and its UI streaming capabilities are 100% Vercel AI SDK functionality and not InformAI itself, but InformAI plays well with it and supports streaming UI instead of/in addition to streaming text responses from the LLM: + +```tsx +//inside our submitUserMessage function +const result = await streamUI({ + model: openai("gpt-4o-2024-08-06"), + system: "You are a helpful assistant who blah blah blah", + messages: aiState.get().messages, + text: ({ content, done }) => { + if (done) { + //save the LLM's response to our AIState + aiState.done({ + ...aiState.get(), + messages: [...aiState.get().messages, { role: "assistant", content }], + }); + } + + return ; + }, + tools: { + redirect: RedirectTool, + backupsTable: BackupsTableTool, + }, +}); +``` -Here's a real-world example of a tool definition used in the [LANsaver](https://github.com/edspencer/lansaver) project ([see the full tool source](https://github.com/edspencer/lansaver/blob/main/app/tools/BackupsTable.tsx)). Most of this file is just textual description telling the LLM what the tool is and how to use it. +In the code snippet above we defined 2 tools that the LLM can execute if it thinks it makes sense to do so. If the tool has a `generate` function, it can render arbitrary React components that will be streamed to the browser. Tools can be defined inline but they're easier to read, test and swap in/out when extracted into their own files (tool-calling LLMs like those from OpenAI are still not great at picking the right tool when given too many options). -The important part of the tool is the `generate` function. Note that this is all just vanilla Vercel AI SDK functionality, and you can read more about it [in their docs](https://sdk.vercel.ai/examples/next-app/interface/route-components). Basically, though, this function `yield`s a Spinner component while it is loading the data for the real component it will show, then does some basic fuzzy searching, then finally returns the `` component, which will be streamed to the UI: +Here's a real-world example of a tool definition used in the [LANsaver](https://github.com/edspencer/lansaver) project ([see the full tool source](https://github.com/edspencer/lansaver/blob/main/app/tools/BackupsTable.tsx)). Most of this file is just textual description telling the LLM what the tool is and how to use it. The important part of the tool is the `generate` function: ```tsx import { z } from "zod"; @@ -440,107 +466,9 @@ const BackupsTableTool = { export default BackupsTableTool; ``` -### Sending message from the UI to the LLM - -The final piece of the puzzle is some piece of UI that allows the user to send messages to the LLM. InformAI comes bundled with a couple of simple UI components out of the box to make this a little faster, but you don't have to use them. - -Here's one way you might do that, with a React component called `ChatWrapper`. This pulls in the `Messages`, `UserMessage` and `ChatBox` React components from InformAI, which are super simple components that just either render messages or allow the user to send a new one. - -The meat of this `ChatWrapper` is the `onMessage` function, which does a few things: - -- Calls `popRecentMessages()` to pull all of the latest component-sent `state` and `event` messages out of `inform-ai` -- Dedupes these messages (multiple React renders will cause repeated `state` messages) -- Runs the messages through `mapComponentMessages`, which just turns the message objects into an LLM-friendly string (you can swap this out for your own implementation) -- Creates a new message object with the text the user just entered -- Adds the LLM-friendly InformAI messages plus the new user message to the message history -- Sends the new messages to our `submitUserMessage` function -- Once the LLM response is received, add it to the messages array so that the InformAI `` component will render it -- Returns true to tell `` to clear the input - -A future version of InformAI may provide a `` component out of the box so that you don't have to copy/paste this into your project, but it is likely that much of what the `onMessage` handler does will be at least somewhat specific to your application. This also gives you the option to provide your own `mapComponentMessages`, `dedupeMessages` and other functions if desired: - -```tsx -"use client"; - -import type { AIProvider } from "@/app/providers/AI"; - -import { useActions, useUIState } from "ai/rsc"; -import { generateId } from "ai"; -import clsx from "clsx"; - -import { Messages, UserMessage, ChatBox, useInformAIContext, dedupeMessages, mapComponentMessages } from "inform-ai"; - -export function ChatWrapper({ className }: { className?: string }) { - const { submitUserMessage } = useActions(); - const { popRecentMessages } = useInformAIContext(); - const [messages, setMessages] = useUIState(); - - async function onMessage(message: string) { - const componentMessages = popRecentMessages(); - - //deduped set of component-generated messages like state updates and events, since the last user message - const newSystemMessages = mapComponentMessages(dedupeMessages(componentMessages)); - - //this is the new user message that will be sent to the AI - const newUserMessage = { id: generateId(), content: message, role: "user" }; - - //the new user message UI that will be added to the chat history - const newUserMessageUI = { ...newUserMessage, content: }; - setMessages([...messages, ...newSystemMessages, newUserMessageUI]); - - //send the new user message to the AI, along with the all the recent messages from components - const responseMessage = await submitUserMessage([...newSystemMessages, newUserMessage]); - - //update the UI with whatever the AI responded with - setMessages((currentMessages) => [...currentMessages, { ...responseMessage, role: "assistant" }]); - - //return true to clear the chat box - return true; - } - - return ( -
- - -
- ); -} -``` - -Drop this into your layout.tsx like so (here we're just using Tailwind to pin it to the bottom right corner): - -```tsx -import "inform-ai/dist/main.css"; -import "./globals.css"; - -//the 2 Providers you just created -import { AIProviders } from "./providers/AI"; - -//optionally include the CurrentState component for easier InformAI debugging -import { CurrentState } from "inform-ai"; - -import { ChatWrapper } from "@/components/ChatWrapper"; - -export default function RootLayout({ - children, -}: Readonly<{ - children: React.ReactNode; -}>) { - return ( - - - - {children} - - - - - - ); -} -``` +Note that this is all just vanilla Vercel AI SDK functionality, and you can read more about it [in their docs](https://sdk.vercel.ai/examples/next-app/interface/route-components). Basically, though, this function `yield`s a Spinner component while it is loading the data for the real component it will show, then does some basic fuzzy searching, then finally returns the `` component, which will be streamed to the UI. -And you're done. You should now see the `` component rendering at the top right of the screen, showing all of the `state` and `event` messages your components have exposed to InformAI, and a simple chat box with message history at the bottom right. When you next send a message to the LLM via the chat input, all of the InformAI messages that have not previously been sent will be, along with your message, giving the LLM all the context you gave it to answer your question. +The [Render Interface During Tool Call](https://sdk.vercel.ai/examples/next-app/tools/render-interface-during-tool-call) documentation in the Vercel AI SDK is a good thing to read if you're not familiar with what that can do already. ### What the LLM sees @@ -554,11 +482,11 @@ Because `` also uses InformAI, via the `useInformAI` hook, the LLM As our project was using the bundled `` component while we had this conversation, we could easily see what the state sent to the LLM looks like directly in our UI: -![CurrentState after this exchange](/docs/current-state-example.png) +![CurrentState after this exchange](/docs/current-state-example-2.png) Here you can see that 4 `state` messages were published to InformAI - the first 2 were for the `SchedulePage` (which has name=`Schedule Detail Page`), and 2 for the freshly-streamed `` that the LLM sent back. Expanding the last message there, we can see that the LLM gave the streamed component a sensible name based on the user's request, and also has the `prompt` and `props` that we supply it in ``. -The 'Last Sent' message at the bottom tells us that all of the messages above were already sent to the LLM, as they were popped off the stack using `popRecentMessages` in our `` component. `ChatWrapper` also did some deduping and conversion of the messages into an LLM-friendly format (see the [Sending Messages to the LLM](#sending-message-from-the-ui-to-the-llm) section), so now if we modify our `actions/AI.tsx` file and `console.log(aiState.get().messages)` we will see this: +The 'Last Sent' message at the bottom tells us that all of the messages above were already sent to the LLM, as they were popped off the stack using `popRecentMessages` in our `` component. `ChatWrapper` also did some deduping and conversion of the messages into an LLM-friendly format. If we add `console.log(aiState.get().messages)` to our `submitUserMessage` function we will see something like this: ``` [ diff --git a/docs/current-state-example-2.png b/docs/current-state-example-2.png new file mode 100644 index 0000000..f63ca80 Binary files /dev/null and b/docs/current-state-example-2.png differ diff --git a/docs/current-state-example.png b/docs/current-state-example.png index 2054527..982ad76 100644 Binary files a/docs/current-state-example.png and b/docs/current-state-example.png differ diff --git a/jest.config.ts b/jest.config.ts index cacd815..800a467 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -1,12 +1,8 @@ -// jest.config.ts import type { JestConfigWithTsJest } from "ts-jest"; const jestConfig: JestConfigWithTsJest = { preset: "ts-jest", testEnvironment: "jsdom", - transform: { - // "^.+\\.(t|j)sx?$": ["@swc/jest"], - }, moduleNameMapper: { "^ai/rsc$": "/node_modules/ai/rsc/dist", "^@/(.*)$": "/$1", diff --git a/package.json b/package.json index b8a24ad..2299ed4 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,7 @@ "url": "git+https://github.com/edspencer/inform-ai.git" }, "author": "Ed Spencer", - "license": "ISC", + "license": "MIT", "devDependencies": { "@changesets/cli": "^2.27.7", "@jest/globals": "^29.7.0", diff --git a/src/ui/ChatWrapper.tsx b/src/ui/ChatWrapper.tsx index e397e56..82b3d61 100644 --- a/src/ui/ChatWrapper.tsx +++ b/src/ui/ChatWrapper.tsx @@ -21,11 +21,10 @@ export interface ChatWrapperProps { * * import { ChatWrapper } from "inform-ai"; * import { useActions, useUIState } from "ai/rsc"; - * import { AIProvider } from "./AI"; * * export function MyCustomChatWrapper() { * const { submitUserMessage } = useActions(); - * const [messages, setMessages] = useUIState(); + * const [messages, setMessages] = useUIState(); * * return ( * ({