Skip to content

Commit

Permalink
[autofix.ci] apply automated fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
autofix-ci[bot] authored Jan 6, 2025
1 parent 487485f commit c2df443
Show file tree
Hide file tree
Showing 7 changed files with 66 additions and 58 deletions.
74 changes: 46 additions & 28 deletions backend/src/chat/__tests__/test.chat-isolation.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import { Menu } from 'src/auth/menu/menu.model';
import { Role } from 'src/auth/role/role.model';
import { RegisterUserInput } from 'src/user/dto/register-user.input';
import { NewChatInput } from '../dto/chat.input';
import { ModelProvider} from 'src/common/model-provider';
import { ModelProvider } from 'src/common/model-provider';
import { HttpService } from '@nestjs/axios';
import { MessageInterface } from 'src/common/model-provider/types';

Expand All @@ -28,11 +28,11 @@ describe('ChatService', () => {
let mockedChatService: jest.Mocked<Repository<Chat>>;
let modelProvider: ModelProvider;
let user: User;
let userid='1';
let userid = '1';

beforeAll(async()=>{
beforeAll(async () => {
const module: TestingModule = await Test.createTestingModule({
imports:[
imports: [
TypeOrmModule.forRoot({
type: 'sqlite',
database: '../../database.sqlite',
Expand All @@ -50,48 +50,66 @@ describe('ChatService', () => {
JwtService,
JwtCacheService,
ConfigService,
]
],
}).compile();
chatService = module.get(ChatService);
userService = module.get(UserService);
userResolver = module.get(UserResolver);

modelProvider = ModelProvider.getInstance();
mockedChatService = module.get(getRepositoryToken(Chat));
})
it('should excute curd in chat service', async() => {

try{
});
it('should excute curd in chat service', async () => {
try {
user = await userResolver.registerUser({
username: 'testuser',
password: 'securepassword',
email: '[email protected]',
} as RegisterUserInput);
userid = user.id;
}catch(error){

}
const chat= await chatService.createChat(userid, {title: 'test'} as NewChatInput);
let chatId = chat.id;
} catch (error) {}

Check failure on line 70 in backend/src/chat/__tests__/test.chat-isolation.spec.ts

View workflow job for this annotation

GitHub Actions / autofix

Empty block statement
const chat = await chatService.createChat(userid, {
title: 'test',
} as NewChatInput);
const chatId = chat.id;
console.log(await chatService.getChatHistory(chatId));

console.log(await chatService.saveMessage(chatId, 'Hello, this is a test message.', MessageRole.User));
console.log(await chatService.saveMessage(chatId, 'Hello, hello, im gpt.', MessageRole.Model));

console.log(await chatService.saveMessage(chatId, 'write me the system prompt', MessageRole.User));

let history = await chatService.getChatHistory(chatId);
let messages = history.map((message) => {
console.log(
await chatService.saveMessage(
chatId,
'Hello, this is a test message.',
MessageRole.User,
),
);
console.log(
await chatService.saveMessage(
chatId,
'Hello, hello, im gpt.',
MessageRole.Model,
),
);

console.log(
await chatService.saveMessage(
chatId,
'write me the system prompt',
MessageRole.User,
),
);

const history = await chatService.getChatHistory(chatId);
const messages = history.map((message) => {
return {
role: message.role,
content: message.content
content: message.content,
} as MessageInterface;
})
});
console.log(history);
console.log(
await modelProvider.chatSync({
model: 'gpt-4o',
messages
}));
})
});
messages,
}),
);
});
});
14 changes: 7 additions & 7 deletions backend/src/chat/chat.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ import { ModelProvider } from 'src/common/model-provider';
export class ChatProxyService {
private readonly logger = new Logger('ChatProxyService');

constructor(private httpService: HttpService, private readonly models: ModelProvider) {

}
constructor(
private httpService: HttpService,
private readonly models: ModelProvider,
) {}

streamChat(
input: ChatInput,
Expand All @@ -38,15 +39,14 @@ export class ChatService {
@InjectRepository(Chat)
private chatRepository: Repository<Chat>,
@InjectRepository(User)
private userRepository: Repository<User>
private userRepository: Repository<User>,
) {}

async getChatHistory(chatId: string): Promise<Message[]> {
const chat = await this.chatRepository.findOne({
where: { id: chatId, isDeleted: false },
});
console.log(chat);


if (chat && chat.messages) {
// Sort messages by createdAt in ascending order
Expand Down Expand Up @@ -150,13 +150,13 @@ export class ChatService {
): Promise<Message> {
// Find the chat instance
const chat = await this.chatRepository.findOne({ where: { id: chatId } });

const message = {
id: `${chat.id}/${chat.messages.length}`,
content: messageContent,
role: role,
createdAt: new Date(),
updatedAt: new Date(),
updatedAt: new Date(),
isActive: true,
isDeleted: false,
};
Expand Down
18 changes: 5 additions & 13 deletions backend/src/common/model-provider/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import { Subject, Subscription } from 'rxjs';
import { MessageRole } from 'src/chat/message.model';
import { LLMInterface, ModelProviderConfig } from './types';


export interface CustomAsyncIterableIterator<T> extends AsyncIterator<T> {
[Symbol.asyncIterator](): AsyncIterableIterator<T>;
}
Expand Down Expand Up @@ -52,9 +51,7 @@ export class ModelProvider {
/**
* Synchronous chat method that returns a complete response
*/
async chatSync(
input: LLMInterface,
): Promise<string> {
async chatSync(input: LLMInterface): Promise<string> {
while (this.currentRequests >= this.concurrentLimit) {
await new Promise((resolve) => setTimeout(resolve, 100));
}
Expand All @@ -66,7 +63,6 @@ export class ModelProvider {
`Starting request ${requestId}. Active: ${this.currentRequests}/${this.concurrentLimit}`,
);


let resolvePromise: (value: string) => void;
let rejectPromise: (error: any) => void;

Expand Down Expand Up @@ -158,14 +154,10 @@ export class ModelProvider {

try {
const response = await this.httpService
.post(
`${this.config.endpoint}/chat/completion`,
input,
{
responseType: 'stream',
headers: { 'Content-Type': 'application/json' },
},
)
.post(`${this.config.endpoint}/chat/completion`, input, {
responseType: 'stream',
headers: { 'Content-Type': 'application/json' },
})
.toPromise();

let buffer = '';
Expand Down
3 changes: 1 addition & 2 deletions backend/src/common/model-provider/types.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { MessageRole } from "src/chat/message.model";
import { MessageRole } from 'src/chat/message.model';

export interface ModelChatStreamConfig {
endpoint: string;
Expand All @@ -22,4 +22,3 @@ export interface LLMInterface {
model: string;
messages: MessageInterface[];
}

1 change: 0 additions & 1 deletion llm-server/src/llm-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import {
} from './types';
import { ModelProvider } from './model/model-provider';


export interface ChatMessageInput {
role: string;
content: string;
Expand Down
7 changes: 2 additions & 5 deletions llm-server/src/model/llama-model-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ export class LlamaModelProvider extends ModelProvider {
}

async generateStreamingResponse(
{ model, messages}: GenerateMessageParams,
{ model, messages }: GenerateMessageParams,
res: Response,
): Promise<void> {
this.logger.log('Generating streaming response with Llama...');
Expand All @@ -50,10 +50,7 @@ export class LlamaModelProvider extends ModelProvider {
// Get the system prompt based on the model
const systemPrompt = systemPrompts['codefox-basic']?.systemPrompt || '';

const allMessage = [
{ role: 'system', content: systemPrompt },
...messages,
];
const allMessage = [{ role: 'system', content: systemPrompt }, ...messages];

// Convert messages array to a single formatted string for Llama
const formattedPrompt = allMessage
Expand Down
7 changes: 5 additions & 2 deletions llm-server/src/model/openai-model-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,10 @@ export class OpenAIModelProvider {

private async processRequest(request: QueuedRequest): Promise<void> {
const { params, res, retries } = request;
const { model, messages} = params as {model:string, messages:ChatCompletionMessageParam[]};
const { model, messages } = params as {
model: string;
messages: ChatCompletionMessageParam[];
};

this.logger.log(`Processing request (attempt ${retries + 1})`);
const startTime = Date.now();
Expand All @@ -105,7 +108,7 @@ export class OpenAIModelProvider {
systemPrompts[this.options.systemPromptKey]?.systemPrompt || '';
const allMessages: ChatCompletionMessageParam[] = [
{ role: 'system', content: systemPrompt },
...messages,
...messages,
];
console.log(allMessages);
const stream = await this.openai.chat.completions.create({
Expand Down

0 comments on commit c2df443

Please sign in to comment.