
    |hS                     h   d Z ddlZddlmZ ddlZddlZddlmZ dZ	d"dZ
 G d dej                        Z G d d	e      Z G d
 dej                        Z G d de      Z G d dej                         Z G d dej                        Z G d dej                        Z G d dej                        Z G d dej                        Z G d dej                        Z G d dej                        Z G d dej                        Z G d dej                        Z G d  d!ej                        Zy)#zConvolution modules.    N)List)ConvConv2	LightConvDWConvDWConvTranspose2dConvTransposeFocus	GhostConvChannelAttentionSpatialAttentionCBAMConcatRepConvIndexc                     |dkD  r4t        | t              r|| dz
  z  dz   n| D cg c]  }||dz
  z  dz    c}} |(t        | t              r| dz  n| D cg c]  }|dz  	 c}}|S c c}w c c}w )zPad to 'same' shape outputs.      )
isinstanceint)kpdxs       Z/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/nn/modules/conv.pyautopadr      ss    1u)!S1AQK!OQR7SAQUa7Sy C(AFq.A!qAv.AH 8T.As   A&A+c                   R     e Zd ZdZ ej
                         Zd fd	Zd Zd Z	 xZ
S )r   a?  
    Standard convolution module with batch normalization and activation.

    Attributes:
        conv (nn.Conv2d): Convolutional layer.
        bn (nn.BatchNorm2d): Batch normalization layer.
        act (nn.Module): Activation function layer.
        default_act (nn.Module): Default activation function (SiLU).
    c	                 R   t         	|           t        j                  ||||t	        |||      ||d      | _        t        j                  |      | _        |du r| j                  | _        yt        |t        j                        r|| _        yt        j                         | _        y)a  
        Initialize Conv layer with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p (int, optional): Padding.
            g (int): Groups.
            d (int): Dilation.
            act (bool | nn.Module): Activation function.
        FgroupsdilationbiasTN)super__init__nnConv2dr   convBatchNorm2dbndefault_actr   ModuleIdentityact
selfc1c2r   sr   gr   r-   	__class__s
            r   r$   zConv.__init__3   s     	IIb"aGAq!,<QQRY^_	..$'*d{4##z#ryy?Y_a_j_j_l    c                 `    | j                  | j                  | j                  |                  S z
        Apply convolution, batch normalization and activation to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        r-   r)   r'   r/   r   s     r   forwardzConv.forwardF   $     xx		!-..r5   c                 B    | j                  | j                  |            S )z
        Apply convolution and activation without batch normalization.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        r-   r'   r9   s     r   forward_fusezConv.forward_fuseR        xx		!%%r5   )r   r   Nr   r   T__name__
__module____qualname____doc__r%   SiLUr*   r$   r:   r>   __classcell__r4   s   @r   r   r   &   s'     "'')Km&
/
&r5   r   c                   6     e Zd ZdZd fd	Zd Zd Zd Z xZS )r   a'  
    Simplified RepConv module with Conv fusing.

    Attributes:
        conv (nn.Conv2d): Main 3x3 convolutional layer.
        cv2 (nn.Conv2d): Additional 1x1 convolutional layer.
        bn (nn.BatchNorm2d): Batch normalization layer.
        act (nn.Module): Activation function layer.
    c	                     t         	|   ||||||||       t        j                  ||d|t	        d||      ||d      | _        y)a  
        Initialize Conv2 layer with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p (int, optional): Padding.
            g (int): Groups.
            d (int): Dilation.
            act (bool | nn.Module): Activation function.
        r3   r   r-   r   Fr   N)r#   r$   r%   r&   r   cv2r.   s
            r   r$   zConv2.__init__j   sL     	RAqA<99RQ71a+;APQX]^r5   c                     | j                  | j                  | j                  |      | j                  |      z               S r7   )r-   r)   r'   rK   r9   s     r   r:   zConv2.forward{   s1     xx		!txx{ :;<<r5   c                 `    | j                  | j                  | j                  |                  S )z
        Apply fused convolution, batch normalization and activation to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        r8   r9   s     r   r>   zConv2.forward_fuse   r;   r5   c                    t        j                  | j                  j                  j                        }|j
                  dd D cg c]  }|dz  	 }}| j                  j                  j                  j                         |dddd|d   |d   dz   |d   |d   dz   f<   | j                  j                  xj                  |z  c_        | j                  d       | j                  | _
        yc c}w )zFuse parallel convolutions.r   Nr   r   rK   )torch
zeros_liker'   weightdatashaperK   clone__delattr__r>   r:   )r/   wr   is       r   
fuse_convszConv2.fuse_convs   s    TYY--223WWQR[)Q!V))48HHOO4H4H4N4N4P!Q!qtax1!q
01		"((	 *s   C.)   r   Nr   r   T)	rA   rB   rC   rD   r$   r:   r>   rX   rF   rG   s   @r   r   r   _   s    _"
=
/)r5   r   c                   L     e Zd ZdZd ej
                         f fd	Zd Z xZS )r   a  
    Light convolution module with 1x1 and depthwise convolutions.

    This implementation is based on the PaddleDetection HGNetV2 backbone.

    Attributes:
        conv1 (Conv): 1x1 convolution layer.
        conv2 (DWConv): Depthwise convolution layer.
    r   c                 r    t         |           t        ||dd      | _        t	        ||||      | _        y)a!  
        Initialize LightConv layer with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size for depthwise convolution.
            act (nn.Module): Activation function.
        r   Fr-   N)r#   r$   r   conv1r   conv2)r/   r0   r1   r   r-   r4   s        r   r$   zLightConv.__init__   s4     	"b!/
BA3/
r5   c                 B    | j                  | j                  |            S )z
        Apply 2 convolutions to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        )r^   r]   r9   s     r   r:   zLightConv.forward   s     zz$**Q-((r5   )	rA   rB   rC   rD   r%   ReLUr$   r:   rF   rG   s   @r   r   r      s!     "#	 0
)r5   r   c                   $     e Zd ZdZd fd	Z xZS )r   zDepth-wise convolution module.c           
      Z    t         |   ||||t        j                  ||      ||       y)aQ  
        Initialize depth-wise convolution with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            d (int): Dilation.
            act (bool | nn.Module): Activation function.
        rJ   Nr#   r$   mathgcd)r/   r0   r1   r   r2   r   r-   r4   s          r   r$   zDWConv.__init__   s,     	RA"b)9QCHr5   r   r   r   TrA   rB   rC   rD   r$   rF   rG   s   @r   r   r      s    (I Ir5   r   c                   $     e Zd ZdZd fd	Z xZS )r   z(Depth-wise transpose convolution module.c                 Z    t         |   ||||||t        j                  ||             y)aH  
        Initialize depth-wise transpose convolution with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p1 (int): Padding.
            p2 (int): Output padding.
        )r    Nrc   )r/   r0   r1   r   r2   p1p2r4   s          r   r$   zDWConvTranspose2d.__init__   s,     	RAr2dhhr26FGr5   )r   r   r   r   rg   rG   s   @r   r   r      s    2H Hr5   r   c                   R     e Zd ZdZ ej
                         Zd fd	Zd Zd Z	 xZ
S )r	   as  
    Convolution transpose module with optional batch normalization and activation.

    Attributes:
        conv_transpose (nn.ConvTranspose2d): Transposed convolution layer.
        bn (nn.BatchNorm2d | nn.Identity): Batch normalization layer.
        act (nn.Module): Activation function layer.
        default_act (nn.Module): Default activation function (SiLU).
    c                 f   t         |           t        j                  ||||||       | _        |rt        j
                  |      nt        j                         | _        |du r| j                  | _        yt        |t        j                        r|| _        yt        j                         | _        y)a}  
        Initialize ConvTranspose layer with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p (int): Padding.
            bn (bool): Use batch normalization.
            act (bool | nn.Module): Activation function.
        r"   TN)r#   r$   r%   ConvTranspose2dconv_transposer(   r,   r)   r*   r   r+   r-   )	r/   r0   r1   r   r2   r   r)   r-   r4   s	           r   r$   zConvTranspose.__init__   s     	 00RAq2vN(*"..$'*d{4##z#ryy?Y_a_j_j_lr5   c                 `    | j                  | j                  | j                  |                  S )z
        Apply transposed convolution, batch normalization and activation to input.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        )r-   r)   rp   r9   s     r   r:   zConvTranspose.forward  s'     xx 3 3A 6788r5   c                 B    | j                  | j                  |            S )z
        Apply activation and convolution transpose operation to input.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        )r-   rp   r9   s     r   r>   zConvTranspose.forward_fuse  s     xx++A.//r5   )r   r   r   TTr@   rG   s   @r   r	   r	      s'     "'')Km$
9
0r5   r	   c                   *     e Zd ZdZd fd	Zd Z xZS )r
   z
    Focus module for concentrating feature information.

    Slices input tensor into 4 parts and concatenates them in the channel dimension.

    Attributes:
        conv (Conv): Convolution layer.
    c           	      V    t         |           t        |dz  ||||||      | _        y)am  
        Initialize Focus module with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p (int, optional): Padding.
            g (int): Groups.
            act (bool | nn.Module): Activation function.
           r\   N)r#   r$   r   r'   )	r/   r0   r1   r   r2   r   r3   r-   r4   s	           r   r$   zFocus.__init__)  s,     	aQ1aS9	r5   c                     | j                  t        j                  |dddddddf   |dddddddf   |dddddddf   |dddddddf   fd            S )a  
        Apply Focus operation and convolution to input tensor.

        Input shape is (B, C, W, H) and output shape is (B, 4C, W/2, H/2).

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        .Nr   r   )r'   rO   catr9   s     r   r:   zFocus.forward:  s     yyAc3Q3!m$4aQTT3Q36G3PSRSPSUVUYXYUY>IZ\]^acdcgfgcgijimlmim^m\n#oqrsttr5   )r   r   Nr   TrA   rB   rC   rD   r$   r:   rF   rG   s   @r   r
   r
     s    :"ur5   r
   c                   *     e Zd ZdZd fd	Zd Z xZS )r   a+  
    Ghost Convolution module.

    Generates more features with fewer parameters by using cheap operations.

    Attributes:
        cv1 (Conv): Primary convolution.
        cv2 (Conv): Cheap operation convolution.

    References:
        https://github.com/huawei-noah/Efficient-AI-Backbones
    c           	          t         |           |dz  }t        ||||d||      | _        t        ||ddd||      | _        y)aQ  
        Initialize Ghost Convolution module with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            g (int): Groups.
            act (bool | nn.Module): Activation function.
        r   Nr\      r   )r#   r$   r   cv1rK   )	r/   r0   r1   r   r2   r3   r-   c_r4   s	           r   r$   zGhostConv.__init__X  sI     	1WB1dA37B1dBC8r5   c                 r    | j                  |      }t        j                  || j                  |      fd      S )z
        Apply Ghost Convolution to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor with concatenated features.
        r   )r|   rO   rw   rK   )r/   r   ys      r   r:   zGhostConv.forwardi  s/     HHQKyy!TXXa[)1--r5   rf   rx   rG   s   @r   r   r   J  s    9".r5   r   c                   t     e Zd ZdZ ej
                         Zd	 fd	Zd Zd Z	d Z
ed        Zd Zd Z xZS )
r   a
  
    RepConv module with training and deploy modes.

    This module is used in RT-DETR and can fuse convolutions during inference for efficiency.

    Attributes:
        conv1 (Conv): 3x3 convolution.
        conv2 (Conv): 1x1 convolution.
        bn (nn.BatchNorm2d, optional): Batch normalization for identity branch.
        act (nn.Module): Activation function.
        default_act (nn.Module): Default activation function (SiLU).

    References:
        https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
    c           	         t         |           |dk(  r|dk(  sJ || _        || _        || _        |du r| j
                  n/t        |t        j                        r|nt        j                         | _
        |	r ||k(  r|dk(  rt        j                  |      nd| _        t        ||||||d      | _        t        ||d|||dz  z
  |d      | _        y)	a  
        Initialize RepConv module with given parameters.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size.
            s (int): Stride.
            p (int): Padding.
            g (int): Groups.
            d (int): Dilation.
            act (bool | nn.Module): Activation function.
            bn (bool): Use batch normalization for identity branch.
            deploy (bool): Deploy mode for inference.
        rY   r   T)num_featuresNF)r   r3   r-   r   )r#   r$   r3   r0   r1   r*   r   r%   r+   r,   r-   r(   r)   r   r]   r^   )r/   r0   r1   r   r2   r   r3   r   r-   r)   deployr4   s              r   r$   zRepConv.__init__  s      	Av!q&  '*d{4##z#ryy?Y_a_j_j_l57B"Ha"..b1UY"b!Q!qe<
"b!Q1qAv:!G
r5   c                 B    | j                  | j                  |            S )z
        Forward pass for deploy mode.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        r=   r9   s     r   r>   zRepConv.forward_fuse  r?   r5   c                     | j                   dn| j                  |      }| j                  | j                  |      | j                  |      z   |z         S )z
        Forward pass for training mode.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        r   )r)   r-   r]   r^   )r/   r   id_outs      r   r:   zRepConv.forward  sD     ggo4771:xx

1

15>??r5   c                     | j                  | j                        \  }}| j                  | j                        \  }}| j                  | j                        \  }}|| j	                  |      z   |z   ||z   |z   fS )z
        Calculate equivalent kernel and bias by fusing convolutions.

        Returns:
            (torch.Tensor): Equivalent kernel
            (torch.Tensor): Equivalent bias
        )_fuse_bn_tensorr]   r^   r)   _pad_1x1_to_3x3_tensor)r/   	kernel3x3bias3x3	kernel1x1bias1x1kernelidbiasids          r   get_equivalent_kernel_biasz"RepConv.get_equivalent_kernel_bias  s{     "11$**=	7!11$**=	7//8&466yAAHLgX_N_bhNhhhr5   c                 `    | yt         j                  j                  j                  | g d      S )z
        Pad a 1x1 kernel to 3x3 size.

        Args:
            kernel1x1 (torch.Tensor): 1x1 convolution kernel.

        Returns:
            (torch.Tensor): Padded 3x3 kernel.
        r   )r   r   r   r   )rO   r%   
functionalpad)r   s    r   r   zRepConv._pad_1x1_to_3x3_tensor  s*     88&&**9lCCr5   c                    |yt        |t              r|j                  j                  }|j                  j
                  }|j                  j                  }|j                  j                  }|j                  j                  }|j                  j                  }nt        |t        j                        rt        | d      s| j                  | j                  z  }t        j                  | j                  |ddft        j                         }	t#        | j                        D ]  }
d|	|
|
|z  ddf<    t%        j&                  |	      j)                  |j                  j*                        | _        | j,                  }|j
                  }|j                  }|j                  }|j                  }|j                  }z   j/                         }|z  j1                  dddd      }|z  |z  |z  z
  fS )a  
        Fuse batch normalization with convolution weights.

        Args:
            branch (Conv | nn.BatchNorm2d | None): Branch to fuse.

        Returns:
            kernel (torch.Tensor): Fused kernel.
            bias (torch.Tensor): Fused bias.
        )r   r   	id_tensorrY   )dtyper   )r   r   r'   rQ   r)   running_meanrunning_varr"   epsr%   r(   hasattrr0   r3   npzerosfloat32rangerO   
from_numpytodevicer   sqrtreshape)r/   branchkernelr   r   gammabetar   	input_dimkernel_valuerW   stdts                r   r   zRepConv._fuse_bn_tensor  s    >fd#[[''F!9911L ))//KII$$E99>>D))--C/4- GGtvv-	!xx)Q(B"**Utww =A;<LA	M1a!78=!&!1!1,!?!B!B6==CWCW!X^^F!..L ,,KMME;;D**CS &&(S[!!"aA.z4,"6"<<<<r5   c           
         t        | d      ry| j                         \  }}t        j                  | j                  j
                  j                  | j                  j
                  j                  | j                  j
                  j                  | j                  j
                  j                  | j                  j
                  j                  | j                  j
                  j                  | j                  j
                  j                  d      j                  d      | _        || j
                  j                  _        || j
                  j                   _        | j#                         D ]  }|j%                           | j'                  d       | j'                  d       t        | d      r| j'                  d       t        | d	      r| j'                  d	       t        | d
      r| j'                  d
       yy)zLFuse convolutions for inference by creating a single equivalent convolution.r'   NT)in_channelsout_channelskernel_sizestridepaddingr!   r    r"   Fr]   r^   nmr)   r   )r   r   r%   r&   r]   r'   r   r   r   r   r   r!   r    requires_grad_rQ   rR   r"   
parametersdetach_rU   )r/   r   r"   paras       r   rX   zRepConv.fuse_convs   sf   4 668II

3355

33::??))JJOO++ZZ__--::??))	
 .
 		 !'		"		OO% 	DLLN	!!4T"4T"4%[) &r5   )rY   r   r   r   r   TFF)rA   rB   rC   rD   r%   rE   r*   r$   r>   r:   r   staticmethodr   r   rX   rF   rG   s   @r   r   r   w  sO      "'')KH6
&@i D D#=J*r5   r   c                   d     e Zd ZdZdeddf fdZdej                  dej                  fdZ xZ	S )r   a  
    Channel-attention module for feature recalibration.

    Applies attention weights to channels based on global average pooling.

    Attributes:
        pool (nn.AdaptiveAvgPool2d): Global average pooling.
        fc (nn.Conv2d): Fully connected layer implemented as 1x1 convolution.
        act (nn.Sigmoid): Sigmoid activation for attention weights.

    References:
        https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet
    channelsreturnNc                     t         |           t        j                  d      | _        t        j
                  ||dddd      | _        t        j                         | _        y)z{
        Initialize Channel-attention module.

        Args:
            channels (int): Number of input channels.
        r   r   Trn   N)	r#   r$   r%   AdaptiveAvgPool2dpoolr&   fcSigmoidr-   )r/   r   r4   s     r   r$   zChannelAttention.__init__,  sI     	((+	))Hh1adC::<r5   r   c                 f    || j                  | j                  | j                  |                  z  S )z
        Apply channel attention to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Channel-attended output tensor.
        )r-   r   r   r9   s     r   r:   zChannelAttention.forward8  s)     488DGGDIIaL1222r5   )
rA   rB   rC   rD   r   r$   rO   Tensorr:   rF   rG   s   @r   r   r     s6    
  
  
 
3 
3%,, 
3r5   r   c                   *     e Zd ZdZd fd	Zd Z xZS )r   a&  
    Spatial-attention module for feature recalibration.

    Applies attention weights to spatial dimensions based on channel statistics.

    Attributes:
        cv1 (nn.Conv2d): Convolution layer for spatial attention.
        act (nn.Sigmoid): Sigmoid activation for attention weights.
    c                     t         |           |dv sJ d       |dk(  rdnd}t        j                  dd||d      | _        t        j
                         | _        y	)
z
        Initialize Spatial-attention module.

        Args:
            kernel_size (int): Size of the convolutional kernel (3 or 7).
        >   rY      zkernel size must be 3 or 7r   rY   r   r   F)r   r"   N)r#   r$   r%   r&   r|   r   r-   )r/   r   r   r4   s      r   r$   zSpatialAttention.__init__P  sW     	f$B&BB$"a'!Q99Q;eL::<r5   c                     || j                  | j                  t        j                  t        j                  |dd      t        j
                  |dd      d   gd                  z  S )z
        Apply spatial attention to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Spatial-attended output tensor.
        r   T)keepdimr   )r-   r|   rO   rw   meanmaxr9   s     r   r:   zSpatialAttention.forward]  sX     488DHHUYY

1a0NPUPYPYZ[]^hlPmnoPp/qst%uvwwwr5   r   rx   rG   s   @r   r   r   E  s     
xr5   r   c                   *     e Zd ZdZd fd	Zd Z xZS )r   a-  
    Convolutional Block Attention Module.

    Combines channel and spatial attention mechanisms for comprehensive feature refinement.

    Attributes:
        channel_attention (ChannelAttention): Channel attention module.
        spatial_attention (SpatialAttention): Spatial attention module.
    c                 b    t         |           t        |      | _        t	        |      | _        y)z
        Initialize CBAM with given parameters.

        Args:
            c1 (int): Number of input channels.
            kernel_size (int): Size of the convolutional kernel for spatial attention.
        N)r#   r$   r   channel_attentionr   spatial_attention)r/   r0   r   r4   s      r   r$   zCBAM.__init__u  s*     	!1"!5!1+!>r5   c                 B    | j                  | j                  |            S )z
        Apply channel and spatial attention sequentially to input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Attended output tensor.
        )r   r   r9   s     r   r:   zCBAM.forward  s      %%d&<&<Q&?@@r5   r   rx   rG   s   @r   r   r   j  s    
?
Ar5   r   c                   J     e Zd ZdZd fd	Zdeej                     fdZ xZ	S )r   z
    Concatenate a list of tensors along specified dimension.

    Attributes:
        d (int): Dimension along which to concatenate tensors.
    c                 0    t         |           || _        y)z
        Initialize Concat module.

        Args:
            dimension (int): Dimension along which to concatenate tensors.
        N)r#   r$   r   )r/   	dimensionr4   s     r   r$   zConcat.__init__  s     	r5   r   c                 B    t        j                  || j                        S )z
        Concatenate input tensors along specified dimension.

        Args:
            x (List[torch.Tensor]): List of input tensors.

        Returns:
            (torch.Tensor): Concatenated tensor.
        )rO   rw   r   r9   s     r   r:   zConcat.forward  s     yyDFF##r5   )r   
rA   rB   rC   rD   r$   r   rO   r   r:   rF   rG   s   @r   r   r     s"    
$ell+ 
$r5   r   c                   J     e Zd ZdZd fd	Zdeej                     fdZ xZ	S )r   zt
    Returns a particular index of the input.

    Attributes:
        index (int): Index to select from input.
    c                 0    t         |           || _        y)zn
        Initialize Index module.

        Args:
            index (int): Index to select from input.
        N)r#   r$   index)r/   r   r4   s     r   r$   zIndex.__init__  s     	
r5   r   c                      || j                      S )z
        Select and return a particular index from input.

        Args:
            x (List[torch.Tensor]): List of input tensors.

        Returns:
            (torch.Tensor): Selected tensor.
        )r   r9   s     r   r:   zIndex.forward  s     }r5   )r   r   rG   s   @r   r   r     s"    
ell+ 
r5   r   )Nr   )rD   rd   typingr   numpyr   rO   torch.nnr%   __all__r   r+   r   r   r   r   ro   r   r	   r
   r   r   r   r   r   r   r    r5   r   <module>r      s        $6&299 6&r;)D ;)|#)		 #)LIT I$H** H$50BII 50p'uBII 'uV*.		 *.Zc*bii c*L%3ryy %3P"xryy "xJ!A299 !AH$RYY $>BII r5   