// app/lib/ai/rag.ts
import { openrouter } from '@/lib/ai/openrouter';
import { createEmbedding } from '@/lib/ai/embeddings';
import { searchDocuments } from '@/lib/ai/vector-store';
export async function ragQuery(query: string) {
// 1. Create query embedding
const queryEmbedding = await createEmbedding(query);
// 2. Search for relevant documents
const relevantDocs = await searchDocuments(queryEmbedding);
// 3. Create context from documents
const context = relevantDocs
.map((doc) => doc.content)
.join('\n\n');
// 4. Generate response with context
const response = await openrouter.chat({
model: 'gpt-4',
messages: [
{
role: 'system',
content: `Use the following context to answer the question. If you don't know the answer, say so.\n\nContext: ${context}`,
},
{ role: 'user', content: query },
],
});
return response.message;
}