o
    JjgG                  
   @   s   d dl mZmZmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZ d	Zed
ddddefdeeee  ee f dededefddZdS )    )ListTypeUnion)
deprecated)BaseLanguageModel)PydanticToolsParser)ChatPromptTemplate)Runnable#convert_pydantic_to_openai_function)	BaseModelzExtract and save the relevant entities mentioned in the following passage together with their properties.

If a property is not present and is not required in the function parameters, do not include it in the output.z0.1.14a|  LangChain has introduced a method called `with_structured_output` thatis available on ChatModels capable of tool calling.You can read more about the method here: <https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. Please follow our extraction use case documentation for more guidelineson how to do information extraction with LLMs.<https://python.langchain.com/docs/use_cases/extraction/>. with_structured_output does not currently support a list of pydantic schemas. If this is a blocker or if you notice other issues, please provide feedback here:<https://github.com/langchain-ai/langchain/discussions/18154>z1.0a4  
            from pydantic import BaseModel, Field
            from langchain_anthropic import ChatAnthropic
    
            class Joke(BaseModel):
                setup: str = Field(description="The setup of the joke")
                punchline: str = Field(description="The punchline to the joke") 
    
            # Or any other chat model that supports tools.
            # Please reference to to the documentation of structured_output
            # to see an up to date list of which models support 
            # with_structured_output.
            model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
            structured_llm = model.with_structured_output(Joke)
            structured_llm.invoke("Tell me a joke about cats. 
                Make sure to call the Joke function.")
            )sincemessageremovalalternativepydantic_schemasllmsystem_messagereturnc                 C   s`   t | ts| g} td|fdg}dd | D }dd |D }|j|d}||B t| dB }|S )a?  Creates a chain that extracts information from a passage.

    Args:
        pydantic_schemas: The schema of the entities to extract.
        llm: The language model to use.
        system_message: The system message to use for extraction.

    Returns:
        A runnable that extracts information from a passage.
    system)userz{input}c                 S   s   g | ]}t |qS  r
   ).0pr   r   `/var/www/html/zoom/venv/lib/python3.10/site-packages/langchain/chains/openai_tools/extraction.py
<listcomp>I   s    z4create_extraction_chain_pydantic.<locals>.<listcomp>c                 S   s   g | ]}d |dqS )function)typer   r   )r   dr   r   r   r   J   s    )tools)
isinstancelistr   from_messagesbindr   )r   r   r   prompt	functionsr   modelchainr   r   r    create_extraction_chain_pydantic   s   
3
r(   N)typingr   r   r   langchain_core._apir   langchain_core.language_modelsr   *langchain_core.output_parsers.openai_toolsr   langchain_core.promptsr   langchain_core.runnablesr	   %langchain_core.utils.function_callingr   pydanticr   _EXTRACTION_TEMPLATEstrr(   r   r   r   r   <module>   s2    '