受欢迎的博客标签

How LLM works?

Published

Consider a user asking, “What is the temperature in Miami?” The LLM processes the question and identifies a weather API as a suitable tool. The system then proceeds as follows:

1.The LLM recommends calling the weather API.
2.The client application executes the tool call.
3.The API returns a response (e.g., 71 degrees Fahrenheit).
4.The LLM interprets the response and formulates a final answer: “The weather in Miami is pleasant at 71 degrees Fahrenheit.”

 

import openai
import requests
import json

# Weather API tool function
def get_weather(city):
    api_key = "API-KEY"  # Replace with your WeatherAPI key
    api_url = f"https://api.weatherapi.com/v1/current.json?key={api_key}&q={city}"
    try:
        response = requests.get(api_url)
        response.raise_for_status()
        data = response.json()
        return data['current']['temp_f']
    except requests.exceptions.RequestException as e:
        return f"Error fetching weather data: {str(e)}"

# Client-side tool execution function
def execute_tool(tool_name, arguments):
    if tool_name == "weather_api":
        args = json.loads(arguments) if isinstance(arguments, str) else arguments
        return get_weather(args["city"])
    return "Invalid tool call"

# Main class to handle traditional tool calling
class TraditionalToolCaller:
    def __init__(self, api_key):
        self.client = openai.OpenAI(api_key=api_key)

    def process_message(self, user_message):
        # Step 1: Client sends message to LLM and LLM recommends a tool
        print(f"User message: {user_message}")
        
        initial_response = self.client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": "You are a helpful assistant. Recommend tools when needed."},
                {"role": "user", "content": user_message}
            ],
            tools=[{
                "type": "function",
                "function": {
                    "name": "weather_api",
                    "description": "Fetches the current temperature of a city in Fahrenheit",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "city": {"type": "string", "description": "The city name"}
                        },
                        "required": ["city"]
                    }
                }
            }],
            tool_choice="auto"
        )

        message = initial_response.choices[0].message
        print(f"LLM recommendation: {message}")

        # Step 2: Client application checks if a tool is recommended and executes it
        if hasattr(message, 'tool_calls') and message.tool_calls:
            tool_call = message.tool_calls[0]
            tool_name = tool_call.function.name
            arguments = tool_call.function.arguments

            # Client executes the tool
            tool_response = execute_tool(tool_name, arguments)
            print(f"Tool response: {tool_response}")

            # Step 3: Client sends tool response back to LLM for final answer
            final_response = self.client.chat.completions.create(
                model="gpt-4",
                messages=[
                    {"role": "system", "content": "You are a helpful assistant."},
                    {"role": "user", "content": user_message},
                    {"role": "assistant", "content": message.content, "tool_calls": message.tool_calls},
                    {"role": "tool", "content": str(tool_response), "tool_call_id": tool_call.id}
                ]
            )
            final_message = final_response.choices[0].message
            print(f"LLM final answer: {final_message.content}")
            return final_message.content

        # If no tool call, return the LLM's direct response
        return message.content

# Usage example
if __name__ == "__main__":
    # Replace with your actual OpenAI API key
    api_key = "OPEN-API-KEY"
    tool_caller = TraditionalToolCaller(api_key)

    # Example user message
    user_input = "What is the temperature in Miami?"
    result = tool_caller.process_message(user_input)
    print(f"Final result: {result}")

 

https://medium.com/garantibbva-teknoloji/understanding-llm-tool-calling-traditional-vs-embedded-approaches-fc7e576d05de