Decorators
ThinkHive provides Python decorators for transparent tracing of AI operations.
@trace_llm
Trace calls to language models.
import thinkhive
from openai import OpenAI
client = OpenAI()
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def generate_response(prompt: str) -> str:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
)
return response.choices[0].message.content
# Traces are automatically created
result = generate_response("Explain AI observability")Parameters
| Parameter | Type | Required | Description |
|---|---|---|---|
model_name | str | Yes | Model identifier (e.g., ‘gpt-4’) |
provider | str | Yes | Provider name (e.g., ‘openai’, ‘anthropic’) |
name | str | No | Custom span name (defaults to function name) |
metadata | dict | No | Additional attributes to record |
Automatic Capture
The decorator automatically captures from OpenAI-compatible responses:
- Token counts (if
response.usageis available) - Model name (if different from requested)
- Finish reason
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def chat_with_tokens(message: str):
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": message}],
)
# Token counts are automatically extracted from response.usage
return response.choices[0].message.content@trace_retrieval
Trace retrieval operations like vector searches.
import thinkhive
from pinecone import Pinecone
pc = Pinecone()
index = pc.Index("knowledge-base")
@thinkhive.trace_retrieval(query="dynamic")
def search_documents(query: str, top_k: int = 5) -> list:
# The query parameter can reference function arguments
results = index.query(
vector=get_embedding(query),
top_k=top_k,
include_metadata=True,
)
return [match.metadata["text"] for match in results.matches]
# Usage
docs = search_documents("What is ThinkHive?", top_k=10)Parameters
| Parameter | Type | Required | Description |
|---|---|---|---|
query | str | No | Query string or arg reference |
top_k | int | No | Number of results to retrieve |
name | str | No | Custom span name |
metadata | dict | No | Additional attributes |
Capturing Document Details
@thinkhive.trace_retrieval(query="dynamic")
def search_with_scores(query: str) -> list:
results = index.query(
vector=get_embedding(query),
top_k=5,
include_metadata=True,
)
# Return structured data for better tracing
return [
{
"id": match.id,
"content": match.metadata["text"],
"score": match.score,
}
for match in results.matches
]@trace_tool
Trace tool or function calls.
import thinkhive
import requests
@thinkhive.trace_tool(tool_name="web_search")
def search_web(query: str) -> str:
response = requests.get(
"https://api.example.com/search",
params={"q": query},
)
return response.json()
@thinkhive.trace_tool(tool_name="calculator")
def calculate(expression: str) -> float:
return eval(expression) # Use safely in production!
# Usage
results = search_web("ThinkHive AI")
answer = calculate("42 * 17")Parameters
| Parameter | Type | Required | Description |
|---|---|---|---|
tool_name | str | Yes | Name of the tool |
name | str | No | Custom span name |
metadata | dict | No | Additional attributes |
Nested Tracing
Decorators automatically create nested spans:
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def generate_answer(question: str, context: str) -> str:
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": f"Context: {context}"},
{"role": "user", "content": question},
],
)
return response.choices[0].message.content
@thinkhive.trace_retrieval()
def retrieve_context(question: str) -> str:
docs = search_documents(question)
return "\n\n".join(docs)
def rag_pipeline(question: str) -> str:
"""RAG pipeline with automatic nested tracing."""
tracer = thinkhive.get_tracer()
with tracer.start_as_current_span("rag_pipeline") as span:
span.set_attribute("question", question)
# These calls become child spans
context = retrieve_context(question)
answer = generate_answer(question, context)
return answer
# Result: rag_pipeline -> retrieve_context -> generate_answerError Handling
Errors are automatically captured in traces:
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def risky_call(prompt: str) -> str:
try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
)
return response.choices[0].message.content
except Exception as e:
# Error is automatically recorded in the span
raise
# The trace will include error details:
# - error.type: Exception class name
# - error.message: Error message
# - error.stack: Stack traceCustom Attributes
Add custom attributes within decorated functions:
from opentelemetry import trace
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def chat_with_context(message: str, customer_id: str) -> str:
# Get current span
span = trace.get_current_span()
# Add custom attributes
span.set_attribute("customer.id", customer_id)
span.set_attribute("customer.tier", "premium")
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": message}],
)
# Add more attributes after the call
span.set_attribute("response.length", len(response.choices[0].message.content))
return response.choices[0].message.contentAsync Support
Decorators work with async functions:
import asyncio
import thinkhive
from openai import AsyncOpenAI
client = AsyncOpenAI()
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
async def async_chat(message: str) -> str:
response = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": message}],
)
return response.choices[0].message.content
@thinkhive.trace_retrieval()
async def async_search(query: str) -> list:
# Async retrieval
return await vector_db.async_search(query)
# Usage
async def main():
result = await async_chat("Hello!")
print(result)
asyncio.run(main())Class Methods
Decorators work with class methods:
class SupportAgent:
def __init__(self, system_prompt: str):
self.system_prompt = system_prompt
self.client = OpenAI()
@thinkhive.trace_llm(model_name="gpt-4", provider="openai")
def respond(self, message: str) -> str:
response = self.client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": message},
],
)
return response.choices[0].message.content
# Usage
agent = SupportAgent("You are a helpful support agent.")
answer = agent.respond("How do I reset my password?")Best Practice: Use descriptive name parameters for clearer traces:
@thinkhive.trace_llm(
model_name="gpt-4",
provider="openai",
name="support_response_generation"
)
def generate_support_response(query: str) -> str:
# ...Next Steps
- Examples - Complete working examples
- JavaScript SDK - TypeScript/JavaScript documentation
- API Reference - REST API documentation