o
    j6h$                     @  s   d Z ddlmZ ddlmZ ddlZddlmZmZm	Z	 ddl
Z
ddlZddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ dZdZG dd dZG dd dZdS )z+Classes for working with the Gemini models.    )annotations)IterableN)AnyUnionoverload)protos)client)caching)content_types)generation_types)helper_types)safety_typesusermodelc                   @  s2  e Zd ZdZ						dCdDddZedEddZedd Zdd ZeZ	ddddFddZ
dGd!d"ZeeddddHd$d%ZeeddddId'd%ZeddddJd)d%Zddd*dddd+dKd1d2Zddd*dddd+dLd4d5Z	dMdddddd6dNd8d9Z	dMdddddd6dNd:d;Zdd*d<dOdAdBZdS )PGenerativeModela  
    The `genai.GenerativeModel` class wraps default parameters for calls to
    `GenerativeModel.generate_content`, `GenerativeModel.count_tokens`, and
    `GenerativeModel.start_chat`.

    This family of functionality is designed to support multi-turn conversations, and multimodal
    requests. What media-types are supported for input and output is model-dependant.

    >>> import google.generativeai as genai
    >>> import PIL.Image
    >>> genai.configure(api_key='YOUR_API_KEY')
    >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
    >>> result = model.generate_content('Tell me a story about a magic backpack')
    >>> result.text
    "In the quaint little town of Lakeside, there lived a young girl named Lily..."

    Multimodal input:

    >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
    >>> result = model.generate_content([
    ...     "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
    >>> result.text
    "**Blueberry Scones** ..."

    Multi-turn conversation:

    >>> chat = model.start_chat()
    >>> response = chat.send_message("Hi, I have some questions for you.")
    >>> response.text
    "Sure, I'll do my best to answer your questions..."

    To list the compatible model names use:

    >>> for m in genai.list_models():
    ...     if 'generateContent' in m.supported_generation_methods:
    ...         print(m.name)

    Arguments:
         model_name: The name of the model to query. To list compatible models use
         safety_settings: Sets the default safety filters. This controls which content is blocked
             by the api before being returned.
         generation_config: A `genai.GenerationConfig` setting the default generation parameters to
             use.
    gemini-1.5-flash-002N
model_namestrsafety_settings(safety_types.SafetySettingOptions | Nonegeneration_config,generation_types.GenerationConfigType | Nonetools(content_types.FunctionLibraryType | Nonetool_config#content_types.ToolConfigType | Nonesystem_instruction content_types.ContentType | Nonec                 C  s   d|vrd| }|| _ t|| _t|| _t|| _	|d u r%d | _
nt|| _
|d u r3d | _nt|| _d | _d | _d S )N/zmodels/)_model_namer   to_easy_safety_dict_safety_settingsr   to_generation_config_dict_generation_configr
   to_function_library_tools_tool_configto_tool_config_system_instruction
to_content_client_async_client)selfr   r   r   r   r   r    r-   t/var/www/html/chefvision.cloud.itp360.com/venv/lib/python3.10/site-packages/google/generativeai/generative_models.py__init__I   s   	
zGenerativeModel.__init__returnc                 C  s   t | dd S )N_cached_content)getattrr,   r-   r-   r.   cached_contentf   s   zGenerativeModel.cached_contentc                 C     | j S N)r   r3   r-   r-   r.   r   j   s   zGenerativeModel.model_namec                 C  sH   dd }t d| j d| j d| j d| j d|| j d| j d	S )
Nc                 S  s*   | rt | jr| jd j }rt|S | S Nr   )lenpartstextrepr)contenttr-   r-   r.   
maybe_texto   s   z+GenerativeModel.__str__.<locals>.maybe_textz?            genai.GenerativeModel(
                model_name='z%',
                generation_config=z",
                safety_settings=z,
                tools=z%,
                system_instruction=z!,
                cached_content=z
            ))textwrapdedentr   r#   r!   r%   r(   r4   )r,   r>   r-   r-   r.   __str__n   s    zGenerativeModel.__str__)r   r   contentscontent_types.ContentsTypeprotos.GenerateContentRequestc          	   
   C  s   t | drt| j||grtd| |}|dur| }|du r&| j}nt|}t	|}t
|}| j }|| t|}| j }|| t|}tj| j|||||| j| jdS )z:Creates a `protos.GenerateContentRequest` from raw inputs.r1   zx`tools`, `tool_config`, `system_instruction` cannot be set on a model instantiated with `cached_content` as its context.N)r   rB   r   r   r   r   r   r4   )hasattranyr(   
ValueError_get_tools_libto_protor&   r
   r'   to_contentsr   r"   r#   copyupdater   r    r!   normalize_safety_settingsr   GenerateContentRequestr   r4   )	r,   rB   r   r   r   r   	tools_lib	merged_gc	merged_ssr-   r-   r.   _prepare_request   s8   










z GenerativeModel._prepare_request!content_types.FunctionLibraryType$content_types.FunctionLibrary | Nonec                 C  s   |d u r| j S t|S r6   )r%   r
   r$   )r,   r   r-   r-   r.   rH      s   
zGenerativeModel._get_tools_libr4   c                C     d S r6   r-   clsr4   r   r   r-   r-   r.   from_cached_content      z#GenerativeModel.from_cached_contentcaching.CachedContentc                C  rU   r6   r-   rV   r-   r-   r.   rX      rY   str | caching.CachedContentc                C  s:   t |trtjj|d}| |j||d}t|d|j |S )au  Creates a model with `cached_content` as model's context.

        Args:
            cached_content: context for the model.
            generation_config: Overrides for the model's generation config.
            safety_settings: Overrides for the model's safety settings.

        Returns:
            `GenerativeModel` object with `cached_content` as its context.
        )name)r   r   r   r1   )
isinstancer   r	   CachedContentgetr   setattrr\   )rW   r4   r   r   r,   r-   r-   r.   rX      s   
Fr   r   streamr   r   request_optionsrb   boolrc   &helper_types.RequestOptionsType | None(generation_types.GenerateContentResponsec             
   C  s
  |st d| j|||||d}|jr|jd jst|jd _| jdu r)t | _|du r/i }z8|rWt	  | jj
|fi |}	W d   n1 sKw   Y  tj|	W S | jj|fi |}
tj|
W S  tjjjy } z|jdr| jd7  _ d}~ww )a	  A multipurpose function to generate responses from the model.

        This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn
        conversations.

        >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
        >>> response = model.generate_content('Tell me a story about a magic backpack')
        >>> response.text

        ### Streaming

        This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,
        but you can iterate over the response chunks as they become available:

        >>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)
        >>> for chunk in response:
        ...   print(chunk.text)

        ### Multi-turn

        This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each
        request. This takes some manual management but gives you complete control:

        >>> messages = [{'role':'user', 'parts': ['hello']}]
        >>> response = model.generate_content(messages) # "Hello, how can I help"
        >>> messages.append(response.candidates[0].content)
        >>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})
        >>> response = model.generate_content(messages)

        For a simpler multi-turn interface see `GenerativeModel.start_chat`.

        ### Input type flexibility

        While the underlying API strictly expects a `list[protos.Content]` objects, this method
        will convert the user input into the correct type. The hierarchy of types that can be
        converted is below. Any of these objects can be passed as an equivalent `dict`.

        * `Iterable[protos.Content]`
        * `protos.Content`
        * `Iterable[protos.Part]`
        * `protos.Part`
        * `str`, `Image`, or `protos.Blob`

        In an `Iterable[protos.Content]` each `content` is a separate message.
        But note that an `Iterable[protos.Part]` is taken as the parts of a single message.

        Arguments:
            contents: The contents serving as the model's prompt.
            generation_config: Overrides for the model's generation config.
            safety_settings: Overrides for the model's safety settings.
            stream: If True, yield response chunks as they are generated.
            tools: `protos.Tools` more info coming soon.
            request_options: Options for the request.
        contents must not be emptyrB   r   r   r   r   N'Request payload size exceeds the limit: The file size is too large. Please use the File API to upload your files instead. Example: `f = genai.upload_file(path); m.generate_content(['tell me about this file:', f])`)	TypeErrorrR   rB   role
_USER_ROLEr*   r   get_default_generative_clientr   rewrite_stream_errorstream_generate_contentGenerateContentResponsefrom_iteratorgenerate_contentfrom_responsegoogleapi_core
exceptionsInvalidArgumentmessage
startswithr,   rB   r   r   rb   r   r   rc   requestiteratorresponseer-   r-   r.   rt      sN   A


z GenerativeModel.generate_content-generation_types.AsyncGenerateContentResponsec             
     s  |st d| j|||||d}|jr |jd js t|jd _| jdu r*t | _|du r0i }zA|r^t	  | jj
|fi |I dH }	W d   n1 sOw   Y  tj|	I dH W S | jj|fi |I dH }
tj|
W S  tjjjy } z|jdr| jd7  _ d}~ww )z8The async version of `GenerativeModel.generate_content`.rg   rh   ri   Nrj   rk   )rl   rR   rB   rm   rn   r+   r   #get_default_generative_async_clientr   rp   rq   AsyncGenerateContentResponsefrom_aiteratorrt   ru   rv   rw   rx   ry   rz   r{   r|   r-   r-   r.   generate_content_asyncX  sP   


z&GenerativeModel.generate_content_async)r   r   r   r   rc   protos.CountTokensResponsec             	   C  sT   |d u ri }| j d u rt | _ tj| j| j|||||dd}| j j|fi |S Nrh   )r   generate_content_request)r*   r   ro   r   CountTokensRequestr   rR   count_tokensr,   rB   r   r   r   r   rc   r}   r-   r-   r.   r     s   


	zGenerativeModel.count_tokensc             	     s\   |d u ri }| j d u rt | _ tj| j| j|||||dd}| j j|fi |I d H S r   )r+   r   r   r   r   r   rR   r   r   r-   r-   r.   count_tokens_async  s    


	z"GenerativeModel.count_tokens_async)history!enable_automatic_function_callingr   0Iterable[content_types.StrictContentType] | Noner   ChatSessionc                C  s(   | j dddkrtdt| ||dS )aM  Returns a `genai.ChatSession` attached to this model.

        >>> model = genai.GenerativeModel()
        >>> chat = model.start_chat(history=[...])
        >>> response = chat.send_message("Hello?")

        Arguments:
            history: An iterable of `protos.Content` objects, or equivalents to initialize the session.
        candidate_count   `Invalid configuration: The chat functionality does not support `candidate_count` greater than 1.)r   r   r   )r#   r_   rG   r   )r,   r   r   r-   r-   r.   
start_chat  s   zGenerativeModel.start_chat)r   NNNNN)r   r   r   r   r   r   r   r   r   r   r   r   r0   r   )rB   rC   r   r   r   r   r   r   r   r   r0   rD   )r   rS   r0   rT   )r4   r   r   r   r   r   r0   r   )r4   rZ   r   r   r   r   r0   r   )r4   r[   r   r   r   r   r0   r   )rB   rC   r   r   r   r   rb   rd   r   r   r   r   rc   re   r0   rf   )rB   rC   r   r   r   r   rb   rd   r   r   r   r   rc   re   r0   r   r6   )rB   rC   r   r   r   r   r   r   r   r   rc   re   r0   r   )r   r   r   rd   r0   r   )__name__
__module____qualname____doc__r/   propertyr4   r   rA   __repr__rR   rH   r   classmethodrX   rt   r   r   r   r   r-   r-   r-   r.   r      s    /

.$o9 r   c                   @  s   e Zd ZdZ		d:d;d
dZdddddddd<ddZdd Zd=d!d"Zd>d$d%Zdddddddd?d'd(Z	d>d)d*Z
d+d, Zd@d.d/ZedAd1d2ZedBd4d5Zejd6d5 ZdCd8d9ZdS )Dr   aN  Contains an ongoing conversation with the model.

    >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
    >>> chat = model.start_chat()
    >>> response = chat.send_message("Hello")
    >>> print(response.text)
    >>> response = chat.send_message("Hello again")
    >>> print(response.text)
    >>> response = chat.send_message(...

    This `ChatSession` object collects the messages sent and received, in its
    `ChatSession.history` attribute.

    Arguments:
        model: The model to use in the chat.
        history: A chat history to initialize the object with.
    NFr   r   r   r   r   rd   c                 C  s(   || _ t|| _d | _d | _|| _d S r6   )r   r
   rJ   _history
_last_sent_last_receivedr   )r,   r   r   r   r-   r-   r.   r/     s
   
zChatSession.__init__ra   r<   content_types.ContentTyper   %generation_types.GenerationConfigTyper   !safety_types.SafetySettingOptionsrb   r   r   r   r   rc   re   r0   rf   c             	   C  s   |du ri }| j r|rtd| j|}t|}|js t|_| jdd }	|		| t
|}|dddkr=td| jj|	||||||d}
| j|
|d | j rh|durh| j|
|	|||||d\| _}}
|| _|
| _|
S )	aP  Sends the conversation history with the added message and returns the model's response.

        Appends the request and response to the conversation history.

        >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
        >>> chat = model.start_chat()
        >>> response = chat.send_message("Hello")
        >>> print(response.text)
        "Hello! How can I assist you today?"
        >>> len(chat.history)
        2

        Call it with `stream=True` to receive response chunks as they are generated:

        >>> chat = model.start_chat()
        >>> response = chat.send_message("Explain quantum physics", stream=True)
        >>> for chunk in response:
        ...   print(chunk.text, end='')

        Once iteration over chunks is complete, the `response` and `ChatSession` are in states identical to the
        `stream=False` case. Some properties are not available until iteration is complete.

        Like `GenerativeModel.generate_content` this method lets you override the model's `generation_config` and
        `safety_settings`.

        Arguments:
             content: The message contents.
             generation_config: Overrides for the model's generation config.
             safety_settings: Overrides for the model's safety settings.
             stream: If True, yield response chunks as they are generated.
        NUnsupported configuration: The `google.generativeai` SDK currently does not support the combination of `stream=True` and `enable_automatic_function_calling=True`.r   r   r   rB   r   r   rb   r   r   rc   r   rb   r   r   r   r   rb   rO   rc   )r   NotImplementedErrorr   rH   r
   r)   rm   rn   r   appendr   r"   r_   rG   rt   _check_response_handle_afcr   r   r,   r<   r   r   rb   r   r   rc   rO   r   r   r-   r-   r.   send_message   sP   *





zChatSession.send_messagec                C  sX   |j jr
t|j |s(|jd jtjjj	tjjj
tjjjfvr*t|jd d S d S r7   )prompt_feedbackblock_reasonr   BlockedPromptException
candidatesfinish_reasonr   	CandidateFinishReasonFINISH_REASON_UNSPECIFIEDSTOP
MAX_TOKENSStopCandidateException)r,   r   rb   r-   r-   r.   r   ^  s   
zChatSession._check_responselist[protos.FunctionCall]c                 C  sD   |j }t|dkrtdt| d|d jj}dd |D }|S )Nr   zZInvalid number of candidates: Automatic function calling only works with 1 candidate, but z were provided.r   c                 S  s   g | ]}|rd |v r|j qS function_callr   ).0partr-   r-   r.   
<listcomp>q  s    z3ChatSession._get_function_calls.<locals>.<listcomp>)r   r8   rG   r<   r9   )r,   r   r   r9   function_callsr-   r-   r.   _get_function_callsj  s   zChatSession._get_function_callsYtuple[list[protos.Content], protos.Content, generation_types.BaseGenerateContentResponse]c                  s   |  | }rZt fdd|D snG||jd j g }	|D ]}
 |
}|d us.J d|	| q tjt|	d}|| | jj	|||| |d}| j
||d |  | }s|^ }}|||fS )Nc                 3      | ]	}t  | V  qd S r6   callabler   fcrO   r-   r.   	<genexpr>      z*ChatSession._handle_afc.<locals>.<genexpr>r   Unexpected state: The function reference (fr) should never be None. It should only return None if the declaration is not callable, which is checked earlier in the code.rm   r9   rB   r   r   rb   r   rc   r   )r   allr   r   r<   r   Contentrn   r   rt   r   r,   r   r   r   r   rb   rO   rc   r   function_response_partsr   frsendr<   r-   r   r.   r   t  s2   

	

zChatSession._handle_afcr   c             	     s   |du ri }| j r|rtd| j|}t|}|js!t|_| jdd }	|		| t
|}|dddkr>td| jj|	||||||dI dH }
| j|
|d | j ro|duro| j|
|	|||||dI dH \| _}}
|| _|
| _|
S )	z0The async version of `ChatSession.send_message`.Nr   r   r   r   r   r   r   )r   r   r   rH   r
   r)   rm   rn   r   r   r   r"   r_   rG   r   r   _handle_afc_asyncr   r   r   r-   r-   r.   send_message_async  sR   





zChatSession.send_message_asyncc                  s   |  | }r^t fdd|D snJ||jd j g }	|D ]}
 |
}|d us/J d|	| q!tjt|	d}|| | jj	|||| |dI d H }| j
||d |  | }s|^ }}|||fS )Nc                 3  r   r6   r   r   r   r-   r.   r     r   z0ChatSession._handle_afc_async.<locals>.<genexpr>r   r   r   r   r   )r   r   r   r   r<   r   r   rn   r   r   r   r   r-   r   r.   r     s4   

	

zChatSession._handle_afc_asyncc                 C  s   t | jt| jdS )N)r   r   )r   r   listr   r3   r-   r-   r.   __copy__
  s   zChatSession.__copy__%tuple[protos.Content, protos.Content]c                 C  sH   | j du r| jd| j f}|S | j| j jd jf}d| _d| _ |S )z=Removes the last request/response pair from the chat history.Nr   )r   r   popr   r   r<   )r,   resultr-   r-   r.   rewind  s   
zChatSession.rewind3generation_types.BaseGenerateContentResponse | Nonec                 C  r5   )z9returns the last received `genai.GenerateContentResponse`)r   r3   r-   r-   r.   last  s   zChatSession.lastlist[protos.Content]c                 C  s   | j }|du r
| jS |jd jtjjjtjjjtjjj	fvr)t
|jd }||_|jdur5t
d|j| j}|jd j}|jsDt|_| j||g d| _d| _ | jS )zThe chat history.Nr   a<  Unable to build a coherent chat history due to a broken streaming response. Refer to the previous exception for details. To inspect the last response object, use `chat.last`. To remove the last request/response `Content` objects from the chat, call `last_send, last_received = chat.rewind()` and continue without it.)r   r   r   r   r   r   r   r   r   r   r   r   _errorBrokenResponseErrorr   r<   rm   _MODEL_ROLEextend)r,   r   errorsentreceivedr-   r-   r.   r   !  s0   

zChatSession.historyc                 C  s   t || _d | _d | _d S r6   )r
   rJ   r   r   r   )r,   r   r-   r-   r.   r   D  s   
r   c              	     s   t   t| jdd} fddzt| j}W n tjtj	fy,   t| j
}Y nw | jd ur8|| j fdd|D }| j}|d urX|jd urS|d n|d d	d
d| d }td| | S )N
z
    c                   s   d  t| |  dS )Nzprotos.Content())r;   typeto_dict)x)
_dict_reprr-   r.   content_reprN  s   z*ChatSession.__repr__.<locals>.content_reprc                   s   g | ]} |qS r-   r-   )r   r   )r   r-   r.   r   X  s    z(ChatSession.__repr__.<locals>.<listcomp>z<STREAMING ERROR>z<STREAMING IN PROGRESS>z,
    z	history=[z, z]
)z7                ChatSession(
                    model=)reprlibReprr   r   replacer   r   r   r   IncompleteIterationErrorr   r   r   r   r   joinr?   r@   )r,   _modelr   last_receivedr   r-   )r   r   r.   r   J  s2   


zChatSession.__repr__)NF)r   r   r   r   r   rd   )r<   r   r   r   r   r   rb   rd   r   r   r   r   rc   re   r0   rf   )r0   r   )r0   r   )r<   r   r   r   r   r   rb   rd   r   r   r   r   rc   re   r0   r   )r0   r   )r0   r   )r0   r   r   )r   r   r   r   r/   r   r   r   r   r   r   r   r   r   r   r   setterr   r-   r-   r-   r.   r     s@    ^


/
@+
"
r   )r   
__future__r   collections.abcr   r?   typingr   r   r   r   google.api_core.exceptionsrv   google.generativeair   r   r	   google.generativeai.typesr
   r   r   r   rn   r   r   r   r-   r-   r-   r.   <module>   s*       I