o
    Jjg                     @   s   d dl mZmZ d dlmZ d dlmZ d dlmZm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZ 		dd
edee dedee def
ddZd	S )    )OptionalSequence)BaseLanguageModel)ChatPromptTemplate)RunnableRunnablePassthrough)BaseToolconvert_to_openai_toolformat_to_openai_tool_messages)OpenAIToolsAgentOutputParserNllmtoolspromptstrictreturnc                    sf   dh |jt|j }|rtd| | j fdd|D d}tjdd d|B |B t B }|S )	a  Create an agent that uses OpenAI tools.

    Args:
        llm: LLM to use as the agent.
        tools: Tools this agent has access to.
        prompt: The prompt to use. See Prompt section below for more on the expected
            input variables.

    Returns:
        A Runnable sequence representing an agent. It takes as input all the same input
        variables as the prompt passed in does. It returns as output either an
        AgentAction or AgentFinish.

    Raises:
        ValueError: If the prompt is missing required variables.

    Example:

        .. code-block:: python

            from langchain import hub
            from langchain_community.chat_models import ChatOpenAI
            from langchain.agents import AgentExecutor, create_openai_tools_agent

            prompt = hub.pull("hwchase17/openai-tools-agent")
            model = ChatOpenAI()
            tools = ...

            agent = create_openai_tools_agent(model, tools, prompt)
            agent_executor = AgentExecutor(agent=agent, tools=tools)

            agent_executor.invoke({"input": "hi"})

            # Using with chat history
            from langchain_core.messages import AIMessage, HumanMessage
            agent_executor.invoke(
                {
                    "input": "what's my name?",
                    "chat_history": [
                        HumanMessage(content="hi! my name is bob"),
                        AIMessage(content="Hello Bob! How can I assist you today?"),
                    ],
                }
            )

    Prompt:

        The agent prompt must have an `agent_scratchpad` key that is a
            ``MessagesPlaceholder``. Intermediate agent actions and tool output
            messages will be passed in here.

        Here's an example:

        .. code-block:: python

            from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

            prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are a helpful assistant"),
                    MessagesPlaceholder("chat_history", optional=True),
                    ("human", "{input}"),
                    MessagesPlaceholder("agent_scratchpad"),
                ]
            )
    agent_scratchpadz#Prompt missing required variables: c                    s   g | ]}t | d qS )r   r	   ).0toolr    Z/var/www/html/zoom/venv/lib/python3.10/site-packages/langchain/agents/openai_tools/base.py
<listcomp>^   s    z-create_openai_tools_agent.<locals>.<listcomp>)r   c                 S   s   t | d S )Nintermediate_stepsr   )xr   r   r   <lambda>c   s    z+create_openai_tools_agent.<locals>.<lambda>)r   )	
differenceinput_variableslistpartial_variables
ValueErrorbindr   assignr   )r   r   r   r   missing_varsllm_with_toolsagentr   r   r   create_openai_tools_agent   s&   H
r'   )N)typingr   r   langchain_core.language_modelsr   langchain_core.prompts.chatr   langchain_core.runnablesr   r   langchain_core.toolsr   %langchain_core.utils.function_callingr
   /langchain.agents.format_scratchpad.openai_toolsr   ,langchain.agents.output_parsers.openai_toolsr   boolr'   r   r   r   r   <module>   s(    