Connect agno to Anthropic

Trying to connect Agno team agemt on collaborate to anthropic - I keep on hitting this error message:

raise self._make_status_error_from_response(err.response) from None
anthropic.BadRequestError: Error code: 400 - {‘type’: ‘error’, ‘error’: {‘type’: ‘invalid_request_error’, ‘message’: ‘messages: Unexpected role “system”. The Messages API accepts a top-level system parameter, not “system” as an input message role.’}}

using version 0.50.0 for Anthropic and Agno 1.4.3

Has anyone faced this error before?

Hi @kishorkukreja , thanks for reaching out and supporting Agno. I’ve shared this with the team, we’re working through all requests one by one and will get back to you soon.If it’s urgent, please let us know. We appreciate your patience!

Wednesday, 9 April

Hey @kishorkukreja , Sorry about it- I just tried it out and it seems to be working for me.

Do you mind sharing your Team Configuration so I could help you better?

HI team

Code for orchestrator:

“”"
Orchestrator Agent

This module implements the Orchestrator Agent, which coordinates all other agents
and manages the overall workflow.
“”"

import os
from textwrap import dedent
from dotenv import load_dotenv
import mlflow

from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.team.team import Team
from agno.tools.reasoning import ReasoningTools
from agno.tools.thinking import ThinkingTools

Load environment variables

load_dotenv()

class OrchestratorAgent:
“”"
Agent that coordinates all other agents and manages the overall workflow.
“”"

def init(self, prompt_name=“orchestrator”, prompt_version=1):
“”"
Initialize the Orchestrator Agent.

Args:
prompt_name (str): Name of the prompt in MLflow registry
prompt_version (int): Version of the prompt to use
“”"
self.prompt_name = prompt_name
self.prompt_version = prompt_version
self.agent_team = None

#Load the prompt from MLflow
try:
self.prompt_template = mlflow.load_prompt(f"prompts:/{prompt_name}/{prompt_version}“).template
print(self.prompt_template)
except Exception as e:
print(f"Error loading prompt from MLflow: {e}”)

self.prompt_template = None

Fallback to local file

with open(f"prompts/orchestrator.txt", “r”) as f:
self.prompt_template = f.read()

def initialize_team(self, agents):
“”"
Initialize the agent team with the provided agents.

Args:
agents (list): List of agent instances to include in the team
“”"
self.agent_team = Team(
name=“Logistics Shipping Agent Team”,
description=“A team of specialized agents for optimizing logistics operations”,
mode=“route”,
model=Claude(id=“claude-3-7-sonnet-latest”),
members=agents,
instructions=self.prompt_template,
tools=[ReasoningTools(add_instructions=True)],
show_tool_calls=True,
markdown=True,
add_datetime_to_instructions=True,
enable_agentic_context=True, # Allow the agent to maintain a shared context
share_member_interactions=True, # Share all member responses with subsequent member requests
show_members_responses=True,
)

def process_request(self, request):
“”"
Process a user request by coordinating the appropriate agents.

Args:
request (str): User request

Returns:
str: Final response from the agent team
“”"
if self.agent_team is None:
raise ValueError(“Agent team not initialized. Call initialize_team() first.”)

Format the prompt with the request

formatted_prompt = self.prompt_template.replace(“{{ request }}”, request)
print(formatted_prompt)
print('HERE I AM ')
#Update the agent’s instructions
self.agent_team.instructions = formatted_prompt
print(self.agent_team.instructions)

Process the request through the agent team

response = self.agent_team.run(request)

return response.content

def create_standalone_agent(self):
“”"
Create a standalone orchestrator agent (without team coordination).
This is useful for initial testing or when other agents are not available.

Returns:
Agent: Standalone orchestrator agent
“”"
return Agent(
name=“Orchestrator”,
role=“Coordinate logistics operations and analyze requests”,
model=Claude(id=“claude-3-7-sonnet-20250219”),
instructions=self.prompt_template,
tools=[ReasoningTools(think=True, add_instructions=True, analyze=True)],
show_tool_calls=True,
markdown=True,
)

def process_standalone_request(self, request):
“”"
Process a request using a standalone orchestrator agent.

Args:
request (str): User request

Returns:
str: Response from the standalone agent
“”"
agent = self.create_standalone_agent()

Format the prompt with the request

formatted_prompt = self.prompt_template.replace(“{{ request }}”, request)
agent.instructions = formatted_prompt
print(‘2’)
print(agent.instructions)

Process the request

response = agent.run(request)

return response.content

Example usage

if name == “main”:

Initialize the Orchestrator Agent

orchestrator = OrchestratorAgent()

Process a request using the standalone agent

response = orchestrator.process_standalone_request(
“I need to optimize the shipping route from New York to Los Angeles for a 2-ton shipment.”
)

print(response)

Error trace attached. Libraries versions here:

absl-py==2.2.2
adagio==0.2.6
agno==1.4.3
alembic==1.15.2
annotated-types==0.7.0
anthropic==0.50.0
anyio==4.9.0
appdirs==1.4.4
blinker==1.9.0
boto3==1.38.8
botocore==1.38.8
cachetools==5.5.2
certifi==2025.4.26
chardet==5.2.0
charset-normalizer==3.4.2
click==8.1.8
cloudpickle==3.1.1
colorama==0.4.6
colorlog==6.9.0
contourpy==1.3.2
coreforecast==0.0.16
cycler==0.12.1
databricks-sdk==0.52.0
Deprecated==1.2.18
distro==1.9.0
docker==7.1.0
docstring_parser==0.16
duckduckgo_search==8.0.1
fastapi==0.115.12
filelock==3.18.0
Flask==3.1.0
fonttools==4.57.0
fs==2.4.16
fsspec==2025.3.2
fugue==0.9.1
geopandas==1.0.1
gitdb==4.0.12
GitPython==3.1.44
google-auth==2.39.0
googleapis-common-protos==1.70.0
graphene==3.4.3
graphql-core==3.2.6
graphql-relay==3.2.0
greenlet==3.2.1
grpcio==1.71.0
h11==0.16.0
httpcore==1.0.9
httpx==0.28.1
huggingface-hub==0.30.2
idna==3.10
immutabledict==4.2.1
importlib_metadata==8.6.1
itsdangerous==2.2.0
Jinja2==3.1.6
jiter==0.9.0
jmespath==1.0.1
joblib==1.4.2
kiwisolver==1.4.8
langtrace-python-sdk==3.8.17
llvmlite==0.44.0
lxml==5.4.0
Mako==1.3.10
Markdown==3.8
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
mdurl==0.1.2
mlflow==2.22.0
mlflow-skinny==2.22.0
mlforecast==1.0.2
networkx==3.4.2
nixtla==0.6.6
numba==0.61.2
numpy==2.2.5
openai==1.77.0
opentelemetry-api==1.32.1
opentelemetry-exporter-otlp-proto-common==1.32.1
opentelemetry-exporter-otlp-proto-grpc==1.32.1
opentelemetry-exporter-otlp-proto-http==1.32.1
opentelemetry-instrumentation==0.53b1
opentelemetry-instrumentation-sqlalchemy==0.53b1
opentelemetry-proto==1.32.1
opentelemetry-sdk==1.32.1
opentelemetry-semantic-conventions==0.53b1
optuna==4.3.0
orjson==3.10.18
ortools==9.12.4544
packaging==24.2
pandas==2.2.3
patsy==1.0.1
pdfkit==1.0.0
pillow==11.2.1
primp==0.15.0
protobuf==5.29.4
pyarrow==19.0.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pydantic==2.11.4
pydantic-settings==2.9.1
pydantic_core==2.33.2
Pygments==2.19.1
pyogrio==0.10.0
pyparsing==3.2.3
pyproj==3.7.1
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
python-multipart==0.0.20
pytz==2025.2
pywin32==310
PyYAML==6.0.2
regex==2024.11.6
reportlab==4.4.0
requests==2.32.3
rich==14.0.0
rsa==4.9.1
s3transfer==0.12.0
safetensors==0.5.3
scikit-learn==1.6.1
scipy==1.15.2
sentry-sdk==2.27.0
setuptools==80.3.0
shapely==2.1.0
shellingham==1.5.4
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
SQLAlchemy==2.0.40
sqlparse==0.5.3
starlette==0.46.2
statsforecast==2.0.1
statsmodels==0.14.4
tenacity==9.1.2
threadpoolctl==3.6.0
tiktoken==0.9.0
tokenizers==0.21.1
tomli==2.2.1
tqdm==4.67.1
trace-attributes==7.2.1
transformers==4.51.3
triad==0.9.8
typer==0.15.3
typing-inspection==0.4.0
typing_extensions==4.13.2
tzdata==2025.2
ujson==5.10.0
urllib3==2.4.0
utilsforecast==0.2.12
uvicorn==0.34.2
waitress==3.0.2
Werkzeug==3.1.3
wrapt==1.17.2
zipp==3.21.0
zstandard==0.23.0

Thanks,
Kishor

(Attachment log.txt is missing)


Hey @kishorkukreja I just tried your code and it worked well for me. Do you mind upgrading to the latest version for both and trying it out again? Also put debug_mode=True to know where exactly it’s failing

Can you share this code please, let me run it as is.

Thanks,
Kishor

Sure, sharing it here:

"""
Orchestrator Agent

This module implements the Orchestrator Agent, which coordinates all other agents
and manages the overall workflow.
"""

import os
from textwrap import dedent
from dotenv import load_dotenv
import mlflow

from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.team.team import Team
from agno.tools.reasoning import ReasoningTools
from agno.tools.thinking import ThinkingTools

# Load environment variables
load_dotenv()

class OrchestratorAgent:
    """
    Agent that coordinates all other agents and manages the overall workflow.
    """

    def __init__(self, prompt_name="orchestrator", prompt_version=1):
        """
        Initialize the Orchestrator Agent.

        Args:
            prompt_name (str): Name of the prompt in MLflow registry
            prompt_version (int): Version of the prompt to use
        """
        self.prompt_name = prompt_name
        self.prompt_version = prompt_version
        self.agent_team = None

        # Load the prompt from MLflow
        try:
            self.prompt_template = mlflow.load_prompt(f"prompts:/{prompt_name}/{prompt_version}").template
            print(self.prompt_template)
        except Exception as e:
            print(f"Error loading prompt from MLflow: {e}")
            self.prompt_template = None

        # Fallback to local file
        with open("cookbook/agent_concepts/multimodal/prompts/orchestrator.txt", "r") as f:
            self.prompt_template = f.read()

    def initialize_team(self, agents):
        """
        Initialize the agent team with the provided agents.

        Args:
            agents (list): List of agent instances to include in the team
        """
        self.agent_team = Team(
            name="Logistics Shipping Agent Team",
            description="A team of specialized agents for optimizing logistics operations",
            mode="route",
            model=Claude(id="claude-3-7-sonnet-latest"),
            members=agents,
            instructions=self.prompt_template,
            tools=[ReasoningTools(add_instructions=True)],
            show_tool_calls=True,
            markdown=True,
            add_datetime_to_instructions=True,
            enable_agentic_context=True,
            share_member_interactions=True,
            show_members_responses=True,
            debug_mode=True
        )

    def process_request(self, request):
        """
        Process a user request by coordinating the appropriate agents.

        Args:
            request (str): User request

        Returns:
            str: Final response from the agent team
        """
        if self.agent_team is None:
            raise ValueError("Agent team not initialized. Call initialize_team() first.")

        # Format the prompt with the request
        formatted_prompt = self.prompt_template.replace("{{ request }}", request)
        self.agent_team.instructions = formatted_prompt

        # Process the request through the agent team
        response = self.agent_team.run(request)

        return response.content

    def create_standalone_agent(self):
        """
        Create a standalone orchestrator agent (without team coordination).

        Returns:
            Agent: Standalone orchestrator agent
        """
        return Agent(
            name="Orchestrator",
            role="Coordinate logistics operations and analyze requests",
            model=Claude(id="claude-3-7-sonnet-20250219"),
            debug_mode=True,
            instructions=self.prompt_template,
            tools=[ReasoningTools(think=True, add_instructions=True, analyze=True)],
            show_tool_calls=True,
            markdown=True,
        )

    def process_standalone_request(self, request):
        """
        Process a request using a standalone orchestrator agent.

        Args:
            request (str): User request

        Returns:
            str: Response from the standalone agent
        """
        agent = self.create_standalone_agent()

        # Ensure prompt_template is loaded
        if self.prompt_template is None:
            raise ValueError("Prompt template not loaded. Ensure the prompt is available in MLflow or as a local file.")

        # Format the prompt with the request
        formatted_prompt = self.prompt_template.replace("{{ request }}", request)
        agent.instructions = formatted_prompt

        # Process the request
        response = agent.run(request)
        return response.content


# Example usage
if __name__ == "__main__":
    # Initialize the Orchestrator Agent
    orchestrator = OrchestratorAgent()

    # Process a request using the standalone agent
    response = orchestrator.process_standalone_request(
        "I need to optimize the shipping route from New York to Los Angeles for a 2-ton shipment."
    )

    print(response)

Let me know if it works for you- I had a local file as a fallback!