
    7|hf0                        d dl mZ d dlZd dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlmZ d dlmZ d dlmZ d dlmZmZ d d	lmZ d d
lmZmZ  ej4                  e      Z G d de      Zy)    )annotationsN)Path)AnyDictIteratorListOptionalUnion)CallbackManagerForLLMRun)LLM)GenerationChunk)get_pydantic_field_namespre_init)_build_model_kwargs)Fieldmodel_validatorc                     e Zd ZU dZdZded<   ded<   	 dZded<   	 dZded	<   	  ed
d      Z	ded<   	  edd      Z
ded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  edd      Zded<   	  ed      Zded<   	 dZded<   	 d Zd!ed"<   	 d#Zd!ed$<   	  ed      Zded%<   	 dZd&ed'<   	 g Zd(ed)<   	 d*Zd!ed+<   	 d,Zded-<   	 d.Zded/<   	 dZd&ed0<   	 d1Zd2ed3<   	 d4Zd2ed5<   	  ee 6      Z!d7ed8<   	 dZ"ded9<   	 dZ#d:ed;<   	 dZ$d<ed=<   	 dZ%ded><   	 e&dJd?       Z' e(d@A      e)dKdB              Z*e+dLdC       Z,e+dLdD       Z-e+dMdE       Z.dNdOdFZ/	 	 dP	 	 	 	 	 	 	 	 	 dQdGZ0	 	 dP	 	 	 	 	 	 	 	 	 dRdHZ1dSdIZ2y)TLlamaCppa  llama.cpp model.

    To use, you should have the llama-cpp-python library installed, and provide the
    path to the Llama model as a named parameter to the constructor.
    Check out: https://github.com/abetlen/llama-cpp-python

    Example:
        .. code-block:: python

            from langchain_community.llms import LlamaCpp
            llm = LlamaCpp(model_path="/path/to/llama/model")
    Nr   clientstr
model_pathzOptional[str]	lora_base	lora_pathi   n_ctx)aliasintn_partsseedTf16_kvboolF
logits_all
vocab_only	use_mlock	n_threadszOptional[int]   n_batchn_gpu_layerssuffix   
max_tokensg?zOptional[float]temperaturegffffff?top_plogprobszOptional[bool]echoOptional[List[str]]stopg?repeat_penalty(   top_k@   last_n_tokens_sizeuse_mmapg      ?floatrope_freq_scaleg     @rope_freq_base)default_factoryDict[str, Any]model_kwargs	streamingzOptional[Union[str, Path]]grammar_pathzOptional[Union[str, Any]]grammarverbosec                   	 ddl m}m} |d   }g d}|D ci c]  }|||   
 }}|d   |d   |d<   |j	                  |d          	  ||fi ||d	<   |d   r!|d   r|d   }	|d   }
t        d|	d|
d      t        |d   t              r|j                  |d         |d<   |S |d   r|j                  |d         |d<   |S 	 |S # t        $ r t        d      w xY wc c}w # t
        $ r}t        d
| d|       d}~ww xY w)z4Validate that llama-cpp-python library is installed.r   )LlamaLlamaGrammarzCould not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-pythonr   )r9   r:   r   r   r   r   r   r    r"   r#   r$   r%   r'   r7   r6   rA   r(   Nr=   r   z&Could not load Llama model from path: z. Received error r@   r?   zCCan only pass in one of grammar and grammar_path. Received grammar=z and grammar_path=.)	llama_cpprC   rD   ImportErrorupdate	Exception
ValueError
isinstancer   from_string	from_file)clsvaluesrC   rD   r   model_param_nameskmodel_paramser@   r?   s              `/var/www/html/test/engine/venv/lib/python3.12/site-packages/langchain_community/llms/llamacpp.pyvalidate_environmentzLlamaCpp.validate_environment   s   	5 L)

$ /@@6!9@@.!-+1.+AL(F>23	$Z@<@F8 )!7Y'G!.1L*/,3  y)3/ , 8 8	9J KF9
 	 N# , 6 6vn7M NF9  m  	I 	4 A  	8 E""#& 	s(   C CC$ C$	D-C>>Dbefore)modec                4    t        |       }t        ||      }|S )z>Build extra kwargs from additional params that were passed in.)r   r   )rN   rO   all_required_field_namess      rT   build_model_kwargszLlamaCpp.build_model_kwargs   s!     $<C#@ $V-EF    c           
        | j                   | j                  | j                  | j                  | j                  | j
                  | j                  | j                  | j                  d	}| j                  r| j                  |d<   |S )z1Get the default parameters for calling llama_cpp.)	r)   r+   r,   r-   r.   r/   stop_sequencesr2   r4   r@   )
r)   r+   r,   r-   r.   r/   r1   r2   r4   r@   )selfparamss     rT   _default_paramszLlamaCpp._default_params   sj     kk//++ZZII"ii"11ZZ

 << $F9r[   c                :    i d| j                   i| j                  S )zGet the identifying parameters.r   )r   r`   r^   s    rT   _identifying_paramszLlamaCpp._identifying_params   s$     K<1JT5I5IJJr[   c                     y)zReturn type of llm.llamacpp rb   s    rT   	_llm_typezLlamaCpp._llm_type   s     r[   c                    | j                   r|t        d      | j                  }|j                  d       | j                   xs |xs g |d<   |S )a  
        Performs sanity check, preparing parameters in format needed by llama_cpp.

        Args:
            stop (Optional[List[str]]): List of stop sequences for llama_cpp.

        Returns:
            Dictionary containing the combined parameters.
        z2`stop` found in both the input and default params.r]   r1   )r1   rJ   r`   pop)r^   r1   r_   s      rT   _get_parameterszLlamaCpp._get_parameters   sT     99)QRR%% 	

#$ 0d0bvr[   c                    | j                   r.d} | j                  d|||d|D ]  }||j                  z  } |S | j                  |      }i ||} | j                  dd|i|}|d   d   d   S )a  Call the Llama model and return the output.

        Args:
            prompt: The prompt to use for generation.
            stop: A list of strings to stop generation when encountered.

        Returns:
            The generated text.

        Example:
            .. code-block:: python

                from langchain_community.llms import LlamaCpp
                llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
                llm.invoke("This is a prompt.")
         )promptr1   run_managerrm   choicesr   textrf   )r>   _streamrp   rj   r   )	r^   rm   r1   rn   kwargscombined_text_outputchunkr_   results	            rT   _callzLlamaCpp._call  s    . >> $& % ' 	 3 %

2$3 ('))$/F))&)F T[[99&9F)$Q'//r[   c              +  *  K   i | j                  |      |} | j                  d
|dd|}|D ]`  }|d   d   j                  dd      }t        |d   d   d   d|i      }	|r(|j	                  |	j
                  | j                  |	       |	 b yw)a\  Yields results objects as they are generated in real time.

        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.

        Args:
            prompt: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            A generator representing the stream of tokens being generated.

        Yields:
            A dictionary like objects containing a string token and metadata.
            See llama-cpp-python docs and below for more.

        Example:
            .. code-block:: python

                from langchain_community.llms import LlamaCpp
                llm = LlamaCpp(
                    model_path="/path/to/local/model.bin",
                    temperature = 0.5
                )
                for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
                        stop=["'","
"]):
                    result = chunk["choices"][0]
                    print(result["text"], end='', flush=True)  # noqa: T201

        T)rm   streamro   r   r.   Nrp   )rp   generation_info)tokenrA   	log_probsrf   )rj   r   getr   on_llm_new_tokenrp   rA   )
r^   rm   r1   rn   rr   r_   ru   partr.   rt   s
             rT   rq   zLlamaCpp._stream,  s     J :D((.9&9BF4B6B 
	DIq)--j$?H#)_Q'/!+X 6E ,,**dllh -  K
	s   BBc                l    | j                   j                  |j                  d            }t        |      S )Nzutf-8)r   tokenizeencodelen)r^   rp   tokenized_texts      rT   get_num_tokenszLlamaCpp.get_num_tokens_  s*    --dkk'.BC>""r[   )rO   r   returnr   )rO   r<   r   r   )r   r<   )r   r   )N)r1   r0   r   r<   )NN)
rm   r   r1   r0   rn   "Optional[CallbackManagerForLLMRun]rr   r   r   r   )
rm   r   r1   r0   rn   r   rr   r   r   zIterator[GenerationChunk])rp   r   r   r   )3__name__
__module____qualname____doc__r   __annotations__r   r   r   r   r   r   r    r"   r#   r$   r%   r'   r(   r)   r+   r,   r-   r.   r/   r1   r2   r4   r6   r7   r9   r:   dictr=   r>   r?   r@   rA   r   rU   r   classmethodrZ   propertyr`   rc   rg   rj   rv   rq   r   rf   r[   rT   r   r      s    FCO+#I}#0#I}#As'*E3*9-GS-? b'D#'-X.FD.1U,7J7@U,7J7/E5It5,$T=I}=C #1I6G]6/ #(N"CL-CF!$KFM'S #J#3#&K&.!E?!.#DkHm)N D. % "D
"@&)NO)2E=.(**M#Hn#1 OU )#NE#+#(#>L.>?It8/3L,3 *.G&- GT): :x (#  $  " K K  8 %):>	(0(0 "(0 8	(0
 (0 
(0Z %):>	11 "1 8	1
 1 
#1f#r[   r   )
__future__r   loggingpathlibr   typingr   r   r   r   r	   r
   langchain_core.callbacksr   #langchain_core.language_models.llmsr   langchain_core.outputsr   langchain_core.utilsr   r   langchain_core.utils.utilsr   pydanticr   r   	getLoggerr   loggerr   rf   r[   rT   <module>r      sF    "   = = = 3 2 C : +			8	$P#s P#r[   