Overview
This tutorial shows you how to build a Retrieval-Augmented Generation (RAG) chatbot that answers user questions by searching your knowledge base and using the results to generate accurate, context-aware responses.
How It Works
Step 1: Set Up Your Knowledge Base
First, create a folder for your documentation and ingest your files.
Create a Folder
const response = await fetch('https://api.Rayrift.com/v1/folders', {
method: 'POST',
headers: {
'Authorization': 'sk_live_...',
'Content-Type': 'application/json'
},
body: JSON.stringify({
name: "Product Manuals",
description: "User guides and documentation"
})
});
const { id: folderId } = await response.json();
console.log('Folder ID:', folderId);
Ingest Documents
Upload your PDF manuals, FAQs, or other documentation:
// Upload a PDF file
const formData = new FormData();
formData.append('file', pdfFile);
formData.append('payload', JSON.stringify({
folder_id: folderId
}));
const uploadResponse = await fetch('https://api.Rayrift.com/v1/documents', {
method: 'POST',
headers: {
'Authorization': 'sk_live_...'
},
body: formData
});
const document = await uploadResponse.json();
console.log('Document uploaded:', document.id);
Step 2: Implement the Search Function
Create a function that searches your knowledge base:
async function searchKnowledgeBase(query, folderId) {
const response = await fetch('https://api.Rayrift.com/v1/search', {
method: 'POST',
headers: {
'Authorization': 'sk_live_...',
'Content-Type': 'application/json'
},
body: JSON.stringify({
query: query,
limit: 5,
folder_id: folderId
})
});
const { results } = await response.json();
return results;
}
Step 3: Build the Chatbot Logic
Combine search results with an LLM to generate answers:
async function answerQuestion(userQuestion, folderId) {
// Step 1: Search the knowledge base
const searchResults = await searchKnowledgeBase(userQuestion, folderId);
// Step 2: Build context from search results
const context = searchResults
.map((hit, index) => `[${index + 1}] ${hit.text}`)
.join('\n\n');
// Step 3: Create prompt for LLM
const prompt = `You are a helpful assistant. Answer the user's question using only the information provided in the context below. If the context doesn't contain enough information, say so.
Context:
${context}
User Question: ${userQuestion}
Answer:`;
// Step 4: Call your LLM (OpenAI, Anthropic, etc.)
const llmResponse = await callLLM(prompt);
return {
answer: llmResponse,
sources: searchResults.map(hit => ({
text: hit.text.substring(0, 100) + '...',
score: hit.score
}))
};
}
// Example LLM integration (using OpenAI)
async function callLLM(prompt) {
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
temperature: 0.7
})
});
const data = await response.json();
return data.choices[0].message.content;
}
Step 4: Create the Chat Interface
Build a simple chat interface:
// Express.js example
app.post('/api/chat', async (req, res) => {
try {
const { message, folderId } = req.body;
const result = await answerQuestion(message, folderId);
res.json({
answer: result.answer,
sources: result.sources
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
Complete Example
Here’s a complete Node.js/Express implementation:
const express = require('express');
const fetch = require('node-fetch');
const app = express();
app.use(express.json());
const Rayrift_API_KEY = process.env.Rayrift_API_KEY;
const Rayrift_API_URL = 'https://api.Rayrift.com/v1';
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
async function searchRayrift(query, folderId) {
const response = await fetch(`${Rayrift_API_URL}/search`, {
method: 'POST',
headers: {
'Authorization': Rayrift_API_KEY,
'Content-Type': 'application/json'
},
body: JSON.stringify({
query,
limit: 5,
folder_id: folderId
})
});
const data = await response.json();
return data.results || [];
}
async function generateAnswer(question, context) {
const prompt = `Answer the following question using the provided context. If the context doesn't contain enough information, say so.
Context:
${context}
Question: ${question}
Answer:`;
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENAI_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }]
})
});
const data = await response.json();
return data.choices[0].message.content;
}
app.post('/chat', async (req, res) => {
const { message, folderId } = req.body;
try {
// Search knowledge base
const results = await searchRayrift(message, folderId);
// Build context
const context = results.map(r => r.text).join('\n\n');
// Generate answer
const answer = await generateAnswer(message, context);
res.json({
answer,
sources: results.map(r => ({
snippet: r.text.substring(0, 150),
score: r.score
}))
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
app.listen(3000, () => {
console.log('Chatbot server running on port 3000');
});
Best Practices
Improve Search Quality: Use specific folder IDs to scope searches to relevant documentation.
Handle Edge Cases: Check if search results are empty and provide appropriate fallback responses.
Show Sources: Display source snippets to users so they can verify information.
Rate Limiting: Implement rate limiting on your chat endpoint to prevent abuse.
Next Steps