
    hh                     F    d Z ddlZddlmZ  G d dej                        Zy)zActivation modules.    Nc                   ^     e Zd ZdZdd fdZdej                  dej                  fdZ xZS )AGLUaK  
    Unified activation function module from AGLU.

    This class implements a parameterized activation function with learnable parameters lambda and kappa, based on the
    AGLU (Adaptive Gated Linear Unit) approach.

    Attributes:
        act (nn.Softplus): Softplus activation function with negative beta.
        lambd (nn.Parameter): Learnable lambda parameter initialized with uniform distribution.
        kappa (nn.Parameter): Learnable kappa parameter initialized with uniform distribution.

    Methods:
        forward: Compute the forward pass of the Unified activation function.

    Examples:
        >>> import torch
        >>> m = AGLU()
        >>> input = torch.randn(2)
        >>> output = m(input)
        >>> print(output.shape)
        torch.Size([2])

    References:
        https://github.com/kostas1515/AGLU
    returnc           	         t         |           t        j                  d      | _        t        j
                  t        j                  j                  t        j                  d||                  | _
        t        j
                  t        j                  j                  t        j                  d||                  | _        y)zEInitialize the Unified activation function with learnable parameters.g      )beta   )devicedtypeN)super__init__nnSoftplusact	Parameterinituniform_torchemptylambdkappa)selfr	   r
   	__class__s      _/var/www/html/dev/engine/venv/lib/python3.12/site-packages/ultralytics/nn/modules/activation.pyr   zAGLU.__init__#   sx    ;;D)\\"''"2"25;;qW\3]"^_
\\"''"2"25;;qW\3]"^_
    xc           	          t        j                  | j                  d      }t        j                  d|z  | j	                  | j
                  |z  t        j                  |      z
        z        S )a  
        Apply the Adaptive Gated Linear Unit (AGLU) activation function.

        This forward method implements the AGLU activation function with learnable parameters lambda and kappa.
        The function applies a transformation that adaptively combines linear and non-linear components.

        Args:
            x (torch.Tensor): Input tensor to apply the activation function to.

        Returns:
            (torch.Tensor): Output tensor after applying the AGLU activation function, with the same shape as the input.
        g-C6?)minr   )r   clampr   expr   r   log)r   r   lams      r   forwardzAGLU.forward*   sN     kk$**&1yy!c'TXXtzzA~3.O%PPQQr   )NN)r   N)	__name__
__module____qualname____doc__r   r   Tensorr"   __classcell__)r   s   @r   r   r      s,    4`R R%,, Rr   r   )r&   r   torch.nnr   Moduler    r   r   <module>r,      s       0R299 0Rr   