OpenAI & Anthropic

Guard OpenAI and Anthropic agent loops with budget control and trustworthy tool execution.

OpenAI Function Calling

agentguard wraps your tool functions so they work seamlessly with OpenAI's function calling API. Define your tools normally, guard them, then pass them to the model. Pair that with response-based spend tracking on the client side and you get one runtime for both budget control and tool-call reliability.

python
import os
from openai import OpenAI
from agentguard import guard
from agentguard.integrations import OpenAIToolExecutor

client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

@guard(validate_input=True, verify_response=True, max_retries=2)
def get_weather(city: str) -> dict:
    """Get the current weather for a city."""
    return {"city": city, "temperature": 72, "conditions": "sunny"}

@guard(validate_input=True)
def search_web(query: str) -> str:
    """Search the web for current information."""
    return f"Search results for: {query}"

executor = OpenAIToolExecutor()
executor.register(get_weather).register(search_web)

messages = [{"role": "user", "content": "What's the weather in Tokyo?"}]

response = client.chat.completions.create(
    model="gpt-4o",
    messages=messages,
    tools=executor.tools,
    tool_choice="auto",
)

if response.choices[0].message.tool_calls:
    results = executor.execute_all(response.choices[0].message.tool_calls)
๐Ÿ’ก Auto schema generation

OpenAIToolExecutor automatically generates JSON schemas from your function's type hints and docstring. No manual schema writing needed.

Real Cost Tracking for OpenAI

Wrap the OpenAI client with guard_openai_client to record provider-reported usage and real spend alongside guarded tool execution. This is the core production story: keep model calls inside budget while validating and verifying the tools the model depends on.

python
from openai import OpenAI
from agentguard import InMemoryCostLedger, TokenBudget
from agentguard.integrations import OpenAIToolExecutor, guard_openai_client

budget = TokenBudget(max_cost_per_session=5.00)
budget.config.cost_ledger = InMemoryCostLedger()

client = guard_openai_client(OpenAI(), budget=budget)
executor = OpenAIToolExecutor().register(get_weather)

response = client.chat.completions.create(
    model="gpt-4o",
    messages=messages,
    tools=executor.tools,
)

Pricing resolves in this order: override pricing, LiteLLM pricing, explicit cost_per_call fallback, then unknown cost with usage still preserved.

Anthropic Claude

Similar integration for Claude's tool use API:

python
from agentguard.integrations.anthropic import to_anthropic_tool
import anthropic

@guard(validate_input=True, verify_response=True)
def search_database(query: str, limit: int = 10) -> list:
    """Search the product database."""
    return db.search(query, limit=limit)

client = anthropic.Anthropic()
response = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    tools=[to_anthropic_tool(search_database)],
    messages=[{{"role": "user", "content": "Find laptops under $1000"}}],
)

# Execute tool calls with guards
for block in response.content:
    if block.type == "tool_use":
        result = search_database(**block.input)

Real Cost Tracking for Anthropic

Use guard_anthropic_client to capture usage from Claude responses and record spend once per model call, including streaming responses.

Multi-Tool Agent Loop

A complete agent loop with multiple guarded tools:

python
from agentguard import guard, GuardConfig
from agentguard.integrations import OpenAIToolExecutor

config = GuardConfig(validate_input=True, max_retries=2, timeout=30.0)

@guard(config=config)
def search_web(query: str) -> list:
    """Search the web for information."""
    return search_api.search(query)

@guard(config=config)
def get_page(url: str) -> str:
    """Fetch the content of a web page."""
    return requests.get(url).text[:5000]

@guard(config=config)
def send_email(to: str, subject: str, body: str) -> bool:
    """Send an email message."""
    return email_api.send(to=to, subject=subject, body=body)

# Build tool registry
executor = OpenAIToolExecutor()
executor.register(search_web).register(get_page).register(send_email)
tools_map = {
    "search_web": search_web,
    "get_page": get_page,
    "send_email": send_email,
}

# Agent loop
messages = [{{"role": "user", "content": "Research AI safety and email me a summary"}}]

while True:
    response = client.chat.completions.create(
        model="gpt-4o", messages=messages, tools=executor.tools,
    )
    msg = response.choices[0].message
    messages.append(msg)

    if not msg.tool_calls:
        print(msg.content)
        break

    for tc in msg.tool_calls:
        fn = tools_map[tc.function.name]
        result = fn(**json.loads(tc.function.arguments))
        messages.append({{
            "role": "tool",
            "tool_call_id": tc.id,
            "content": json.dumps(result),
        }})
Edit this page on GitHub