
    9|hR                        d Z ddlmZ ddlmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZ  e	ddd       G d de             Zy)zCUse a single chain to route an input to one of multiple llm chains.    )annotations)AnyDictListOptional)
deprecated)BaseLanguageModel)PromptTemplate)ConversationChain)Chain)LLMChain)MultiRouteChain)LLMRouterChainRouterOutputParser)MULTI_PROMPT_ROUTER_TEMPLATEz0.2.12z1.0zPlease see migration guide here for recommended implementation: https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/)sinceremovalmessagec                  J    e Zd ZdZedd       Ze	 d	 	 	 	 	 	 	 	 	 dd       Zy)MultiPromptChaina  A multi-route chain that uses an LLM router chain to choose amongst prompts.

    This class is deprecated. See below for a replacement, which offers several
    benefits, including streaming and batch support.

    Below is an example implementation:

        .. code-block:: python

            from operator import itemgetter
            from typing import Literal

            from langchain_core.output_parsers import StrOutputParser
            from langchain_core.prompts import ChatPromptTemplate
            from langchain_core.runnables import RunnableConfig
            from langchain_openai import ChatOpenAI
            from langgraph.graph import END, START, StateGraph
            from typing_extensions import TypedDict

            llm = ChatOpenAI(model="gpt-4o-mini")

            # Define the prompts we will route to
            prompt_1 = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are an expert on animals."),
                    ("human", "{input}"),
                ]
            )
            prompt_2 = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are an expert on vegetables."),
                    ("human", "{input}"),
                ]
            )

            # Construct the chains we will route to. These format the input query
            # into the respective prompt, run it through a chat model, and cast
            # the result to a string.
            chain_1 = prompt_1 | llm | StrOutputParser()
            chain_2 = prompt_2 | llm | StrOutputParser()


            # Next: define the chain that selects which branch to route to.
            # Here we will take advantage of tool-calling features to force
            # the output to select one of two desired branches.
            route_system = "Route the user's query to either the animal or vegetable expert."
            route_prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", route_system),
                    ("human", "{input}"),
                ]
            )


            # Define schema for output:
            class RouteQuery(TypedDict):
                """Route query to destination expert."""

                destination: Literal["animal", "vegetable"]


            route_chain = route_prompt | llm.with_structured_output(RouteQuery)


            # For LangGraph, we will define the state of the graph to hold the query,
            # destination, and final answer.
            class State(TypedDict):
                query: str
                destination: RouteQuery
                answer: str


            # We define functions for each node, including routing the query:
            async def route_query(state: State, config: RunnableConfig):
                destination = await route_chain.ainvoke(state["query"], config)
                return {"destination": destination}


            # And one node for each prompt
            async def prompt_1(state: State, config: RunnableConfig):
                return {"answer": await chain_1.ainvoke(state["query"], config)}


            async def prompt_2(state: State, config: RunnableConfig):
                return {"answer": await chain_2.ainvoke(state["query"], config)}


            # We then define logic that selects the prompt based on the classification
            def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
                if state["destination"] == "animal":
                    return "prompt_1"
                else:
                    return "prompt_2"


            # Finally, assemble the multi-prompt chain. This is a sequence of two steps:
            # 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
            # alongside the input query.
            # 2) Route the input query to chain_1 or chain_2, based on the
            # selection.
            graph = StateGraph(State)
            graph.add_node("route_query", route_query)
            graph.add_node("prompt_1", prompt_1)
            graph.add_node("prompt_2", prompt_2)

            graph.add_edge(START, "route_query")
            graph.add_conditional_edges("route_query", select_node)
            graph.add_edge("prompt_1", END)
            graph.add_edge("prompt_2", END)
            app = graph.compile()

            result = await app.ainvoke({"query": "what color are carrots"})
            print(result["destination"])
            print(result["answer"])
    c                    dgS )Ntext )selfs    c/var/www/html/test/engine/venv/lib/python3.12/site-packages/langchain/chains/router/multi_prompt.pyoutput_keyszMultiPromptChain.output_keys   s	    x    Nc                   |D cg c]  }|d    d|d     }}dj                  |      }t        j                  |      }t        |dgt	                     }	t        j                  ||	      }
i }|D ],  }|d   }|d   }t        |dg	      }t        ||
      }|||<   . |xs t        |d      } | d|
||d|S c c}w )zCConvenience constructor for instantiating from destination prompts.namez: description
)destinationsinput)templateinput_variablesoutput_parserprompt_template)r$   r%   )llmpromptr   )r(   
output_key)router_chaindestination_chainsdefault_chainr   )	joinr   formatr
   r   r   from_llmr   r   )clsr(   prompt_infosr-   kwargspr"   destinations_strrouter_templaterouter_promptr+   r,   p_infor   r'   r)   chain_default_chains                     r   from_promptszMultiPromptChain.from_prompts   s    EQQq1V9+R-(8'9:QQ99\26==)
 '$$I,.

 &..sMB" 	-F&>D$%67O#_wiXFV4E',t$	- 'W*;PV*W 
%1(
 	
 	
' Rs   B?)returnz	List[str])N)
r(   r	   r2   zList[Dict[str, str]]r-   zOptional[Chain]r3   r   r<   r   )__name__
__module____qualname____doc__propertyr   classmethodr;   r   r   r   r   r      sf    rh   
 *.	 
 
 + 
 '	 

  
 
 
  
r   r   N)r@   
__future__r   typingr   r   r   r   langchain_core._apir   langchain_core.language_modelsr	   langchain_core.promptsr
   langchain.chainsr   langchain.chains.baser   langchain.chains.llmr   langchain.chains.router.baser   "langchain.chains.router.llm_routerr   r   +langchain.chains.router.multi_prompt_promptr   r   r   r   r   <module>rN      s[    I " , , * < 1 . ' ) 8 Q T 
	Z	Z
 Z
Z
r   