o
    Jjg                     @   s   d dl mZmZmZmZ d dlmZ d dlmZ d dl	m
Z
 d dlmZ d dlmZmZ d dlmZ d dlmZ d d	lmZ eeeeef  gee
 f Zed
dedee dededef
ddZdS )    )CallableListSequenceTuple)AgentAction)BaseLanguageModel)BaseMessage)ChatPromptTemplate)RunnableRunnablePassthrough)BaseTool)format_to_tool_messages)ToolsAgentOutputParsermessage_formatterllmtoolspromptr   returnc                   sl   dh |jt|j }|rtd| t| dstd| |}tj fddd|B |B t	 B }|S )a.	  Create an agent that uses tools.

    Args:
        llm: LLM to use as the agent.
        tools: Tools this agent has access to.
        prompt: The prompt to use. See Prompt section below for more on the expected
            input variables.
        message_formatter: Formatter function to convert (AgentAction, tool output)
            tuples into FunctionMessages.

    Returns:
        A Runnable sequence representing an agent. It takes as input all the same input
        variables as the prompt passed in does. It returns as output either an
        AgentAction or AgentFinish.

    Example:

        .. code-block:: python

            from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
            from langchain_anthropic import ChatAnthropic
            from langchain_core.prompts import ChatPromptTemplate

            prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are a helpful assistant"),
                    ("placeholder", "{chat_history}"),
                    ("human", "{input}"),
                    ("placeholder", "{agent_scratchpad}"),
                ]
            )
            model = ChatAnthropic(model="claude-3-opus-20240229")

            @tool
            def magic_function(input: int) -> int:
                """Applies a magic function to an input."""
                return input + 2

            tools = [magic_function]

            agent = create_tool_calling_agent(model, tools, prompt)
            agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

            agent_executor.invoke({"input": "what is the value of magic_function(3)?"})

            # Using with chat history
            from langchain_core.messages import AIMessage, HumanMessage
            agent_executor.invoke(
                {
                    "input": "what's my name?",
                    "chat_history": [
                        HumanMessage(content="hi! my name is bob"),
                        AIMessage(content="Hello Bob! How can I assist you today?"),
                    ],
                }
            )

    Prompt:

        The agent prompt must have an `agent_scratchpad` key that is a
            ``MessagesPlaceholder``. Intermediate agent actions and tool output
            messages will be passed in here.
    agent_scratchpadz#Prompt missing required variables: 
bind_toolszFThis function requires a .bind_tools method be implemented on the LLM.c                    s    | d S )Nintermediate_steps )xr   r   `/var/www/html/zoom/venv/lib/python3.10/site-packages/langchain/agents/tool_calling_agent/base.py<lambda>f   s    z+create_tool_calling_agent.<locals>.<lambda>)r   )

differenceinput_variableslistpartial_variables
ValueErrorhasattrr   r   assignr   )r   r   r   r   missing_varsllm_with_toolsagentr   r   r   create_tool_calling_agent   s*   F


r&   N)typingr   r   r   r   langchain_core.agentsr   langchain_core.language_modelsr   langchain_core.messagesr   langchain_core.prompts.chatr	   langchain_core.runnablesr
   r   langchain_core.toolsr   (langchain.agents.format_scratchpad.toolsr   %langchain.agents.output_parsers.toolsr   strMessageFormatterr&   r   r   r   r   <module>   s,    