Examples
Complete, runnable examples demonstrating ThinkHive SDK features.
RAG Agent
A complete RAG (Retrieval-Augmented Generation) agent with full tracing.
- package.json
- index.ts
- .env
index.ts
import { init, traceLLM, traceRetrieval, traceChain } from '@thinkhive/sdk';
import OpenAI from 'openai';
import { Pinecone } from '@pinecone-database/pinecone';
// Initialize
init({ serviceName: 'rag-agent' });
const openai = new OpenAI();
const pinecone = new Pinecone();
const index = pinecone.index('knowledge-base');
async function ragChat(userQuestion: string): Promise<string> {
return traceChain({
name: 'rag-pipeline',
input: { question: userQuestion },
}, async () => {
// Step 1: Embed the query
const embedding = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: userQuestion,
});
// Step 2: Retrieve relevant documents
const documents = await traceRetrieval({
name: 'pinecone-search',
query: userQuestion,
topK: 5,
}, async () => {
const results = await index.query({
vector: embedding.data[0].embedding,
topK: 5,
includeMetadata: true,
});
return results.matches.map(m => m.metadata?.text || '');
});
// Step 3: Generate response
const response = await traceLLM({
name: 'answer-generation',
modelName: 'gpt-4',
provider: 'openai',
input: userQuestion,
}, async () => {
return openai.chat.completions.create({
model: 'gpt-4',
messages: [
{
role: 'system',
content: `Answer based on this context:\n\n${documents.join('\n\n')}`,
},
{ role: 'user', content: userQuestion },
],
});
});
return response.choices[0].message.content || '';
});
}
// Usage
const answer = await ragChat('What is ThinkHive?');
console.log(answer);LangChain Agent
A LangChain agent with tool use and full tracing.
langchain-agent.ts
import { init } from '@thinkhive/sdk';
import { setupLangChainCallback } from '@thinkhive/sdk/instrumentation/langchain';
import { ChatOpenAI } from '@langchain/openai';
import { AgentExecutor, createOpenAIFunctionsAgent } from 'langchain/agents';
import { DynamicTool } from '@langchain/core/tools';
import { pull } from 'langchain/hub';
init({ serviceName: 'langchain-agent' });
// Define tools
const searchTool = new DynamicTool({
name: 'web_search',
description: 'Search the web for information',
func: async (query: string) => {
// Your search implementation
return `Results for: ${query}`;
},
});
const calculatorTool = new DynamicTool({
name: 'calculator',
description: 'Perform mathematical calculations',
func: async (expression: string) => {
return String(eval(expression));
},
});
async function runAgent(question: string) {
const model = new ChatOpenAI({ modelName: 'gpt-4' });
const prompt = await pull('hwchase17/openai-functions-agent');
const agent = await createOpenAIFunctionsAgent({
llm: model,
tools: [searchTool, calculatorTool],
prompt,
});
const executor = AgentExecutor.fromAgentAndTools({
agent,
tools: [searchTool, calculatorTool],
});
// Create ThinkHive callback
const callback = setupLangChainCallback({
runName: 'customer-query',
metadata: { channel: 'chat' },
});
const result = await executor.invoke(
{ input: question },
{ callbacks: [callback] }
);
return result.output;
}
// Usage
const answer = await runAgent('What is 42 * 17?');
console.log(answer);Multi-Turn Conversation
Track complete conversations with context.
conversation.ts
import { init, traceChain, traceLLM, runs } from '@thinkhive/sdk';
import OpenAI from 'openai';
init({ serviceName: 'conversation-agent' });
const openai = new OpenAI();
interface Message {
role: 'user' | 'assistant' | 'system';
content: string;
}
class ConversationAgent {
private messages: Message[] = [];
private conversationId: string;
constructor(systemPrompt: string) {
this.conversationId = `conv_${Date.now()}`;
this.messages.push({ role: 'system', content: systemPrompt });
}
async chat(userMessage: string): Promise<string> {
this.messages.push({ role: 'user', content: userMessage });
const response = await traceChain({
name: 'conversation-turn',
input: { message: userMessage },
metadata: {
conversationId: this.conversationId,
turnNumber: this.messages.length,
},
}, async () => {
const completion = await traceLLM({
name: 'chat-response',
modelName: 'gpt-4',
provider: 'openai',
input: userMessage,
}, async () => {
return openai.chat.completions.create({
model: 'gpt-4',
messages: this.messages,
});
});
return completion.choices[0].message.content || '';
});
this.messages.push({ role: 'assistant', content: response });
// Create a run for the conversation
await runs.create({
name: 'conversation-turn',
input: userMessage,
output: response,
outcome: 'success',
metadata: {
conversationId: this.conversationId,
messageCount: this.messages.length,
},
});
return response;
}
}
// Usage
const agent = new ConversationAgent('You are a helpful customer support agent.');
await agent.chat('Hi, I need help with my order');
await agent.chat('My order number is 12345');
await agent.chat('I want to return it');Business Impact Tracking
Link AI performance to business metrics.
business-tracking.ts
import { init, traceLLM, runs, captureCustomerContext } from '@thinkhive/sdk';
import OpenAI from 'openai';
init({ serviceName: 'support-agent' });
const openai = new OpenAI();
async function handleSupportQuery(
customerId: string,
query: string
): Promise<{ response: string; satisfaction: number }> {
// Capture customer context
await captureCustomerContext({
customerId,
metrics: {
subscription_tier: 'premium',
arr: 5000,
health_score: 0.85,
},
});
// Handle the query
const response = await traceLLM({
name: 'support-response',
modelName: 'gpt-4',
provider: 'openai',
input: query,
metadata: {
customerId,
channel: 'support',
},
}, async () => {
return openai.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: 'You are a helpful support agent.' },
{ role: 'user', content: query },
],
});
});
const responseText = response.choices[0].message.content || '';
// Simulate satisfaction score (in production, collect from user)
const satisfaction = Math.random() > 0.2 ? 5 : 3;
// Create run with business context
await runs.create({
name: 'support-interaction',
input: query,
output: responseText,
outcome: satisfaction >= 4 ? 'success' : 'failure',
metadata: {
customerId,
satisfaction,
arr: 5000,
tier: 'premium',
},
});
return { response: responseText, satisfaction };
}
// Usage
const result = await handleSupportQuery(
'cust_123',
'How do I upgrade my plan?'
);
console.log(result);Error Handling
Robust error handling with tracing.
error-handling.ts
import { init, traceLLM, runs } from '@thinkhive/sdk';
import OpenAI from 'openai';
init({ serviceName: 'robust-agent' });
const openai = new OpenAI();
async function robustLLMCall(prompt: string, maxRetries = 3): Promise<string> {
let lastError: Error | null = null;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const response = await traceLLM({
name: 'llm-call',
modelName: 'gpt-4',
provider: 'openai',
input: prompt,
metadata: {
attempt,
maxRetries,
},
}, async () => {
return openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
});
});
return response.choices[0].message.content || '';
} catch (error) {
lastError = error as Error;
// Log the failure
await runs.create({
name: 'llm-call-failure',
input: prompt,
output: '',
outcome: 'failure',
metadata: {
attempt,
error: lastError.message,
errorType: lastError.name,
},
});
if (attempt < maxRetries) {
// Exponential backoff
await new Promise(r => setTimeout(r, Math.pow(2, attempt) * 1000));
}
}
}
throw lastError;
}
// Usage
try {
const response = await robustLLMCall('What is ThinkHive?');
console.log(response);
} catch (error) {
console.error('All retries failed:', error);
}Running Examples
Clone and run any example:
git clone https://github.com/thinkhive/examples
cd examples/rag-agent
npm install
cp .env.example .env # Add your API keys
npm startView Your Traces: After running examples, visit app.thinkhive.ai/traces to see the captured data.
Next Steps
- Python SDK - Python examples and usage
- API Reference - REST API for advanced integrations
- Guides - In-depth feature guides