
    |h              
           d dl Z d dlZd dlZd dlZd dlmZ d dlmc mZ	 d dl
mZ dZd ZddZd ZddZdej"                  d	ej"                  d
ej"                  dej"                  dej"                  f
dZy)    N)uniform_)#multi_scale_deformable_attn_pytorchinverse_sigmoidc                     t        j                  t        |      D cg c]  }t        j                  |        c}      S c c}w )a  
    Create a list of cloned modules from the given module.

    Args:
        module (nn.Module): The module to be cloned.
        n (int): Number of clones to create.

    Returns:
        (nn.ModuleList): A ModuleList containing n clones of the input module.

    Examples:
        >>> import torch.nn as nn
        >>> layer = nn.Linear(10, 10)
        >>> clones = _get_clones(layer, 3)
        >>> len(clones)
        3
    )nn
ModuleListrangecopydeepcopy)modulen_s      [/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/nn/modules/utils.py_get_clonesr      s.    $ ==qBA$--/BCCBs   A c                 L    t        t        j                  d| z
  | z               S )a  
    Initialize conv/fc bias value according to a given probability value.

    This function calculates the bias initialization value based on a prior probability using the inverse error function.
    It's commonly used in object detection models to initialize classification layers with a specific positive prediction
    probability.

    Args:
        prior_prob (float, optional): Prior probability for bias initialization.

    Returns:
        (float): Bias initialization value calculated from the prior probability.

    Examples:
        >>> bias = bias_init_with_prob(0.01)
        >>> print(f"Bias initialization value: {bias:.4f}")
        Bias initialization value: -4.5951
       )floatnplog)
prior_probs    r   bias_init_with_probr   $   s$    & "&&!j.J67788    c                     dt        j                  | j                  j                  d         z  }t	        | j                  | |       t        | d      r&| j                  t	        | j                  | |       yyy)a  
    Initialize the weights and biases of a linear module.

    This function initializes the weights of a linear module using a uniform distribution within bounds calculated
    from the input dimension. If the module has a bias, it is also initialized.

    Args:
        module (nn.Module): Linear module to initialize.

    Returns:
        (nn.Module): The initialized module.

    Examples:
        >>> import torch.nn as nn
        >>> linear = nn.Linear(10, 5)
        >>> initialized_linear = linear_init(linear)
    r   r   biasN)mathsqrtweightshaper   hasattrr   )r   bounds     r   linear_initr!   :   se    $ 		&----a011EV]]UFE*vv6;;#:ufe, $;r   c                     | j                  dd      } | j                  |      }d| z
  j                  |      }t        j                  ||z        S )a  
    Calculate the inverse sigmoid function for a tensor.

    This function applies the inverse of the sigmoid function to a tensor, which is useful in various neural network
    operations, particularly in attention mechanisms and coordinate transformations.

    Args:
        x (torch.Tensor): Input tensor with values in range [0, 1].
        eps (float, optional): Small epsilon value to prevent numerical instability.

    Returns:
        (torch.Tensor): Tensor after applying the inverse sigmoid function.

    Examples:
        >>> x = torch.tensor([0.2, 0.5, 0.8])
        >>> inverse_sigmoid(x)
        tensor([-1.3863,  0.0000,  1.3863])
    r   r   )minmax)r#   )clamptorchr   )xepsx1x2s       r   r   r   R   sM    & 	
A1A	
S	B
a%3	B99R"Wr   valuevalue_spatial_shapessampling_locationsattention_weightsreturnc                 :   | j                   \  }}}}|j                   \  }}}}	}
}| j                  |D cg c]
  \  }}||z   c}}d      }d|z  dz
  }g }t        |      D ]  \  }\  }}||   j                  d      j	                  dd      j                  ||z  |||      }|dddddd|f   j	                  dd      j                  dd      }t        j                  ||ddd	      }|j                  |        |j	                  dd      j                  ||z  d||	|
z        }t        j                  |d
      j                  d
      |z  j                  d      j                  |||z  |      }|j	                  dd      j                         S c c}}w )a  
    Implement multi-scale deformable attention in PyTorch.

    This function performs deformable attention across multiple feature map scales, allowing the model to attend to
    different spatial locations with learned offsets.

    Args:
        value (torch.Tensor): The value tensor with shape (bs, num_keys, num_heads, embed_dims).
        value_spatial_shapes (torch.Tensor): Spatial shapes of the value tensor with shape (num_levels, 2).
        sampling_locations (torch.Tensor): The sampling locations with shape
            (bs, num_queries, num_heads, num_levels, num_points, 2).
        attention_weights (torch.Tensor): The attention weights with shape
            (bs, num_queries, num_heads, num_levels, num_points).

    Returns:
        (torch.Tensor): The output tensor with shape (bs, num_queries, embed_dims).

    References:
        https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py
    r   )dim   Nr   bilinearzerosF)modepadding_modealign_corners)r   split	enumerateflatten	transposereshapeFgrid_sampleappendr&   stacksumview
contiguous)r+   r,   r-   r.   bsr   	num_heads
embed_dimsnum_queries
num_levels
num_pointsH_W_
value_listsampling_gridssampling_value_listlevelvalue_l_sampling_grid_l_sampling_value_l_outputs                        r   r   r   k   s   4 $);; B9j;M;S;S8A{Iz:q4HI&"bb2gIqQJ++a/N$%9: 6xB
 e$,,Q/99!Q?GGYXbdfhjk *!Q5.9CCAqIQQRSUVWMM&Zg]b
 	""#456$ *33Aq9AA
Y;
Z(? 
(b	1	9	9"	=@Q	Q	R	b)j(+	6 
 Aq!,,..; Js   F
)g{Gz?)gh㈵>)r
   r   numpyr   r&   torch.nnr   torch.nn.functional
functionalr?   torch.nn.initr   __all__r   r   r!   r   Tensorr    r   r   <module>r^      s           "
BD*9,-029/<<9/,,9/ 9/ ||	9/
 \\9/r   