PydanticAI - Agent Framework ❤️‍🔥

1 Like

Gm gm, i have build my agent and now want to make it compatible with studio attomator, I get getting an error
Error generating AI response: ‘type’
INFO: 127.0.0.1:45034 - “POST /api/coincashew-agent HTTP/1.1” 200 OK

async def coincashew_agent(
request: AgentRequest,
authenticated: bool = Depends(verify_token)
):
try:
# Fetch conversation history from the DB
conversation_history = await fetch_conversation_history(request.session_id)

    # Convert conversation history to format expected by agent
    # This will be different depending on your framework (Pydantic AI, LangChain, etc.)
    messages = []
    for msg in conversation_history:
        msg_data = msg["message"]
        msg_type = msg_data["type"]
        msg_content = msg_data["content"]
        msg = {"role": msg_type, "content": msg_content}
        messages.append(msg)

    # Store user's query
    await store_message(
        session_id=request.session_id,
        message_type="human",
        content=request.query
    )            

    
    agent_response = await get_ai_response(request.query, messages)


    # Store agent's response
    await store_message(
        session_id=request.session_id,
        message_type="assistant",
        content=agent_response,
        data={"request_id": request.request_id}
    )

    return AgentResponse(success=True)

except Exception as e:
    print(f"Error processing request: {str(e)}")
    # Store error message in conversation
    await store_message(
        session_id=request.session_id,
        message_type="ai",
        content="I apologize, but I encountered an error processing your request.",
        data={"error": str(e), "request_id": request.request_id}
    )
    return AgentResponse(success=False)

async def get_ai_response(query: str, history: List[dict]) → str:
“”"
Generates an AI response based on the user’s query and conversation history.
“”"
try:
# Generate query embedding
query_embedding = await embedding_client.embeddings.create(
model=“text-embedding-3-small”,
input=query
)

    # Perform a search on the Coincashew documentation
    results = supabase.rpc(
        'match_site_pages',
        {
            'query_embedding': query_embedding.data[0].embedding,
            'match_count': 5
        }
    ).execute()

    # Build the context from search results
    context = "\n\n".join([
        f"From {doc['url']} (Section: {doc['title']}):\n{doc['content']}"
        for doc in results.data
    ])

    # Create system and user prompts
    system_prompt = """
    You are an expert on Coincashew documentation. Answer questions accurately based on the provided context.
    """
    messages = [{"role": "system", "content": system_prompt}]
    
    # Add conversation history to the message list
    for msg in history:
        messages.append({"role": msg["type"], "content": msg["content"]})
    
    # Add user query
    messages.append({"role": "user", "content": query + "\n\n" + context})

    # Call the AI model
    agent_response = await agent.run(messages)
    return agent_response.result.answer

except Exception as e:
    print(f"Error generating AI response: {str(e)}")
    return "I encountered an issue while processing your request. Please try again later."

Did you build your agent with Pydantic AI? When you define the format for the messages, where I have the comments:

Convert conversation history to format expected by agent

This will be different depending on your framework (Pydantic AI, LangChain, etc.)

Make sure you are doing it correctly for Pydantic AI! I have an example here: