
    9|h                         d dl mZ d dlmZ d dlmZ  G d deeeef            ZdZ	 ee	g d      Z
d	Z eeg d
      Zy)    )Tuple)BaseOutputParser)PromptTemplatec                   <    e Zd ZU dZdZeed<   	 dedeeef   fdZ	y)FinishedOutputParserz4Output parser that checks if the output is finished.FINISHEDfinished_valuetextreturnc                 z    |j                         }| j                  |v }|j                  | j                  d      |fS )N )stripr	   replace)selfr
   cleanedfinisheds       ]/var/www/html/test/engine/venv/lib/python3.12/site-packages/langchain/chains/flare/prompts.pyparsezFinishedOutputParser.parse   s9    **,&&'1t22B7AA    N)
__name__
__module____qualname____doc__r	   str__annotations__r   boolr    r   r   r   r      s1    >$NC$6B# B%T	"2 Br   r   zRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.

>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response})
user_inputcontextresponse)templateinput_variablesa&  Given a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:

>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}

The question to which the answer is the term/entity/phrase "{uncertain_span}" is:)r   current_responseuncertain_spanN)typingr   langchain_core.output_parsersr   langchain_core.promptsr   r   r   r   PROMPT_TEMPLATEPROMPT"QUESTION_GENERATOR_PROMPT_TEMPLATEQUESTION_GENERATOR_PROMPTr   r   r   <module>r,      s`     : 1	B+E#t),<= 	B 
9
&U " +/H r   