Skip to content

LLM - Tools

Install packages

1
2
3
4
!uv pip install -q \
    litellm==1.78.5 \
    python-dotenv==1.1.1 \
    pydantic==2.12.3

Import packages

1
2
3
4
5
6
7
import json

import litellm  # type: ignore
from dotenv import load_dotenv  # type: ignore
from pydantic import BaseModel, Field  # type: ignore

load_dotenv()
True

Define Weather Tool

1
2
3
def get_weather(location: str, reasoning: str):
    print(reasoning)
    return f"The temperature in {location} is 20°C."

Define tools specification

tools = [
    {
        "type": "function",
        "name": "get_weather",
        "description": "Get current temperature for provided coordinates in celsius.",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {"type": "string"},
                "reasoning": {"type": "string"},
            },
            "required": ["location", "reasoning"],
            "additionalProperties": False,
        },
        "strict": True,
    }
]

Define messages

  • System message, defined once
  • User messages
1
2
3
4
5
6
7
messages = [
    {"role": "system", "content": "You're a helpful weather assistant."},
    {
        "role": "user",
        "content": "What's the weather like in Paris today?",
    },
]

Get chat completion

1
2
3
4
5
6
7
completion = litellm.completion(
    model="gemini/gemini-2.0-flash",
    messages=messages,
    tools=tools,
)

print(completion.choices[0].message.tool_calls)
[ChatCompletionMessageToolCall(index=0, function=Function(arguments='{"reasoning": "I want to know the weather in Paris.", "location": "Paris"}', name='get_weather'), id='call_854f6a5249554dc3a0674da8dd2f', type='function')]

Call the function and append its result to messages list

def call_function(name, args):
    if name == "get_weather":
        return get_weather(**args)


for tool_call in completion.choices[0].message.tool_calls:
    name = tool_call.function.name
    args = json.loads(tool_call.function.arguments)
    messages.append(completion.choices[0].message)

    result = call_function(name, args)
    messages.append(
        {
            "role": "tool",
            "tool_call_id": tool_call.id,
            "content": result,
        }
    )

messages
I want to know the weather in Paris.
[{'role': 'system', 'content': "You're a helpful weather assistant."},
 {'role': 'user', 'content': "What's the weather like in Paris today?"},
 Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=0, function=Function(arguments='{"reasoning": "I want to know the weather in Paris.", "location": "Paris"}', name='get_weather'), id='call_854f6a5249554dc3a0674da8dd2f', type='function')], function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None),
 {'role': 'tool',
  'tool_call_id': 'call_854f6a5249554dc3a0674da8dd2f',
  'content': 'The temperature in Paris is 20°C.'}]

Define output structure

1
2
3
4
5
6
7
class WeatherResponse(BaseModel):
    location: str = Field(
        ..., description="The location for which the weather is reported."
    )
    temperature: float = Field(
        ..., description="The current temperature in Celsius."
    )

Supply results and call the model again

1
2
3
4
5
6
7
8
9
completion_2 = litellm.completion(
    model="gemini/gemini-2.0-flash",
    messages=messages,
    tools=tools,
    response_format=WeatherResponse,
)

response = completion_2.choices[0].message.content
print(response)
{

  "location": "Paris",

  "temperature": 20.0

}