
    '}hZX                        U d dl mZ ddlmZmZmZmZ ddlZddlZddl	Z	ddl
Z
ddlZddlZddlZddlZg dZ e       Zee   ed<    e       Zee   ed<   dgZd	 Z G d
 d      Zd Zej2                  d        Zg Zee   ed<    ej8                  d      Ze
j<                  dddd       Zej@                  ddefd       Z!e
j<                  d ddd       Z"dedefdZ#e"j@                  ddefd       Z!d dd ddZ$d Z%d!dZ&y)"   )
OpOverload    )AnyOptionalSetListN)Libraryimpldefinefallthrough_kernelimpl_abstractget_ctx_impls_defsprimc                      t        d      )zZ
    A dummy function to pass to ``Library.impl`` in order to register a fallthrough.
    z,fallthrough_kernel() should never be called.)NotImplementedError     L/var/www/html/test/engine/venv/lib/python3.12/site-packages/torch/library.pyr   r      s     L
MMr   c                   :    e Zd ZdZd
dZd Zd
dddZd
dZd Zy	)r	   aC  
    A class to create libraries that can be used to register new operators or
    override operators in existing libraries from Python.
    A user can optionally pass in a dispatch keyname if they only want to register
    kernels corresponding to only one specific dispatch key.

    To create a library to override operators in an existing library (with name ns), set the kind to "IMPL".
    To create a new library (with name ns) to register new operators, set the kind to "DEF".
    To create a fragment of a possibly existing library to register operators (and bypass
    the limitation that there is only one library for a given namespace), set the kind to
    "FRAGMENT".

    Args:
        ns: library name
        kind: "DEF", "IMPL" (default: "IMPL"), "FRAGMENT"
        dispatch_key: PyTorch dispatch key (default: "")
    c           	         |dvrt        d|      |t        v r|dk(  s|dk(  rt        |d      t        j                  d      d   }|j                  |j
                  }}t        j                  j                  |||||      | _	        || _
        t               | _        t               | _        g | _        || _        || _        t#        j$                  | t&        t(        | j                  t*        | j                  | j                         y )	N)IMPLDEFFRAGMENTzUnsupported kind: r   r   zJ is a reserved namespace. Please try creating a library with another name.   )limitr   )
ValueError_reserved_namespaces	tracebackextract_stackfilenamelinenotorch_C_dispatch_librarymnsset_op_defs	_op_impls_registration_handleskinddispatch_keyweakreffinalize_del_libraryr   r   )selfr(   r-   r.   framer"   r#   s          r   __init__zLibrary.__init__7   s    221488%%45=DJ<NR!mnn''a03 >>5<<& % : :4\S[]c d"%%#&5VX"	(
 	|VT^^UDMM[_[u[uvr   c                 V    d| j                    d| j                   d| j                   dS )NzLibrary(kind=z, ns=z, dispatch_key=z)>)r-   r(   r.   )r2   s    r   __repr__zLibrary.__repr__M   s-    tyyktwwitGXGXFYY[\\r   r   )tagsc                t   |dvrt        d|       | j                  J t        |t        j                        r|f}| j                  j                  ||t        |            }| j                  dz   |j                  d      d   z   }| j                  j                  |       t        j                  |       |S )a  Defines a new operator and its semantics in the ns namespace.

        Args:
            schema: function schema to define a new operator.
            alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be
                                       inferred from the schema (default behavior) or not ("CONSERVATIVE").
            tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
                                       operator. Tagging an operator changes the operator's behavior
                                       under various PyTorch subsystems; please read the docs for the
                                       torch.Tag carefully before applying it.

        Returns:
            name of the operator as inferred from the schema.

        Example::
            >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LIBRARY)
            >>> my_lib = Library("foo", "DEF")
            >>> my_lib.define("sum(Tensor self) -> Tensor")
        ) FROM_SCHEMACONSERVATIVEzInvalid alias_analysis type ::(r   )RuntimeErrorr'   
isinstancer$   Tagr   tupler(   splitr*   addr   )r2   schemaalias_analysisr7   resultqualnames         r   r   zLibrary.defineP   s    , !DD!=n=MNOOvv!!!dEII&7Dv~uT{C77T>FLL$5a$88(#		(r   c                 h   t        |      st        dt        |             |dk(  r| j                  }t	        |t
              r|}nUt	        |t              r:|j                  j                  }|j                  j                  }|dk7  r|dz   |z   }nt        d      | j                  dz   |j                  d      d   z   dz   |z   }|t        v r8t        dj                  |j                  d      d   || j                              |d	k(  rF|}d|vr| j                   d| }t        j                   j#                  |d
      rt        d| d      | j$                  J | j$                  j'                  ||dk7  r|nd
|       t        j)                  |       | j*                  j)                  |       y)a  Registers the function implementation for an operator defined in the library.

        Args:
            op_name: operator name (along with the overload) or OpOverload object.
            fn: function that's the operator implementation for the input dispatch key or :func:`~fallthrough_kernel`
                to register a fallthrough.
            dispatch_key: dispatch key that the input function should be registered for. By default, it uses
                          the dispatch key that the library was created with.

        Example::
            >>> my_lib = Library("aten", "IMPL")
            >>> def div_cpu(self, other):
            >>>     return self * (1 / other)
            >>> my_lib.impl("div.Tensor", div_cpu, "CPU")
        z;Input function is required to be a callable but found type r9   .zQimpl should be passed either a name or an OpOverload object as the first argument/r<   zThis is not allowed since there's already a kernel registered from python overriding {}'s behavior for {} dispatch key and {} namespace.MetaCompositeImplicitAutogradz?We should not register a meta kernel directly to the operator 'z', because it has a CompositeImplicitAutograd kernel in core. Instead we should let the operator decompose, and ensure that we have meta kernels for the base ops that it decomposes into.N)callable	TypeErrortyper.   r?   strr   _schemanameoverload_namer>   r(   rB   r   formatr$   r%   %_dispatch_has_kernel_for_dispatch_keyr'   r
   rC   r+   )r2   op_namefnr.   rS   rT   keydispatcher_op_names           r   r
   zLibrary.implq   s     |YZ^_aZbYcdee2,,Lgs#D,??''D#OO99M"czM1rssggmdjj.r22S8<G&=   S%vdjj&6r&:L$''RT T 6!!%--(,y3E2F%G" xx==>PRmn"UVZU[ \A AB B vv!!!D,"*<,B]_ab

33r   c                    | j                   | j                   j                          d | _         | j                  D ]  }|j                           | j                  j	                          | j
                  D ]x  }|j                  d      \  }}|j                  d      d   }t        t        j                  |      sFt        t        j                  |      }t        ||      smt        ||       z y )Nr<   rI   r   )r'   resetr,   destroyclearr*   rB   hasattrr$   opsgetattrdelattr)r2   handlerS   r(   name_with_overload	namespaces         r   _destroyzLibrary._destroy   s    66FFLLN00 	FNN	""((*MM 	%D &*ZZ%5"B"%++C03D599b)		2.I9d+It$	%r   Nr9   )	__name__
__module____qualname____doc__r4   r6   r   r
   rf   r   r   r   r	   r	   %   s+    "w,] B; z%r   r	   c                 F    | |z  } ||z  }|D ]  }|j                           y N)r]   )captured_implsop_implscaptured_defsop_defsregistration_handlesrc   s         r   r1   r1      s/    hNWM& r   c               /   v   K   	 t        | i |}| |j                          y # j                          w xY wwrm   )r	   rf   )argskwargslibs      r   _scoped_libraryrw      s1     t&v&	s   9$ 969_keep_alivez\(.*\) -> .*r   )rv   r7   c                f   t        | t              st        dt        |              t        j
                  j                  j                  |       \  }}|!t        |d      }t        j                  |       t        j                  |      st        d| d      |j                  ||z   d|       y)a  Defines a new operator.

    In PyTorch, defining an op (short for "operator") is a two step-process:
    - we need to define the op (by providing an operator name and schema)
    - we need to implement behavior for how the operator interacts with
    various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.

    This entrypoint defines the custom operator (the first step)
    you must then perform the second step by calling various
    ``impl_*`` APIs, like :func:`torch.library.impl` or
    :func:`torch.library.impl_abstract`.

    Args:
        qualname (str): The qualified name for the operator. Should be
            a string that looks like "namespace::name", e.g. "aten::sin".
            Operators in PyTorch need a namespace to
            avoid name collisions; a given operator may only be created once.
            If you are writing a Python library, we recommend the namespace to
            be the name of your top-level module.
        schema (str): The schema of the operator. E.g. "(Tensor x) -> Tensor"
            for an op that accepts one Tensor and returns one Tensor. It does
            not contain the operator name (that is passed in ``qualname``).
        lib (Optional[Library]): If provided, the lifetime of this operator
            will be tied to the lifetime of the Library object.
        tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
            operator. Tagging an operator changes the operator's behavior
            under various PyTorch subsystems; please read the docs for the
            torch.Tag carefully before applying it.

    Example::
        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LIBRARY)
        >>> import torch
        >>> import numpy as np
        >>>
        >>> # Define the operator
        >>> torch.library.define("mylib::sin", "(Tensor x) -> Tensor")
        >>>
        >>> # Add implementations for the operator
        >>> @torch.library.impl("mylibrary::sin", "cpu")
        >>> def f(x):
        >>>     return torch.from_numpy(np.sin(x.numpy()))
        >>>
        >>> # Call the new operator from torch.ops.
        >>> x = torch.randn(3)
        >>> y = torch.ops.mylib.sin(x)
        >>> assert torch.allclose(y, x)

    zGdefine(qualname, schema): expected qualname to be instance of str, got Nr   zadefine(qualname, schema, ...): expected schema to look like e.g. "(Tensor x) -> Tensor" but got ""r9   )rE   r7   )r?   rQ   r   rP   r$   _libraryutilsparse_namespacer	   rx   appendNAMELESS_SCHEMA	fullmatchr   )rG   rD   rv   r7   re   rS   s         r   r   r      s    d h$**.x.)9;< 	< nn**::8DOIt
{i,3$$V,HB ! 	! JJtf}RdJ;r   rv   c                       fd}|S )zOThe old torch.library.define.
    We're keeping this around for BC reasons
    c                 P    j                        }j                  ||        | S rm   )r   r
   )frS   rE   rv   rD   s     r   wrapz_.<locals>.wrap#  s&    zz&.1qr   r   )rv   rD   rE   r   s   ``` r   _r     s    
 Kr   )rv   c                    t        |t              r|f}t        i       |D ]O  }t        j                  j                  |      }|rj                  |       6j                  t        |             Q  fd}||S  ||       y)a  Register an implementation for a device type for this operator.

    You may pass "default" for ``types`` to register this implementation as the
    default implementation for ALL device types.
    Please only use this if the implementation truly supports all device types;
    for example, this is true if it is a composition of built-in PyTorch operators.

    Some valid types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".

    Args:
        qualname (str): Should be a string that looks like "namespace::operator_name".
        types (str | Sequence[str]): The device types to register an impl to.
        lib (Optional[Library]): If provided, the lifetime of this registration
            will be tied to the lifetime of the Library object.

    Examples:
        >>> import torch
        >>> import numpy as np
        >>>
        >>> # Define the operator
        >>> torch.library.define("mylibrary::sin", "(Tensor x) -> Tensor")
        >>>
        >>> # Add implementations for the cpu device
        >>> @torch.library.impl("mylibrary::sin", "cpu")
        >>> def f(x):
        >>>     return torch.from_numpy(np.sin(x.numpy()))
        >>>
        >>> x = torch.randn(3)
        >>> y = torch.ops.mylibrary.sin(x)
        >>> assert torch.allclose(y, x.sin())
    c                     t         j                  j                  j                        \  }}"t	        |d      }t
        j                  |       n}D ]  }|j                  | |        y )Nr   )r$   r{   r|   r}   r	   rx   r~   r
   )funcre   r   use_librY   keysrv   rG   s        r   registerzimpl.<locals>.register[  sd    ~~++;;HE	1;i4Gw'G 	.CLL4-	.r   N)r?   rQ   r)   r$   r%   _parse_dispatch_keyrC   _device_type_to_key)rG   typesr   rv   typis_dispatch_keyr   r   s   `  `   @r   r
   r
   *  s}    B %r7D /((66s; HHSMHH(-./. |r   device_typereturnc                 L    | dk(  ryt         j                  j                  |       S )NdefaultCompositeExplicitAutograd)r$   r%   _dispatch_key_for_device)r   s    r   r   r   k  s$    i
 +88,,[99r   c                       fd}|S )z1Legacy torch.library.impl API. Kept around for BCc                 .    j                  |        | S rm   )r
   )r   r.   rv   rS   s    r   r   z_.<locals>.wrapx  s    q,'r   r   )rv   rS   r.   r   s   ``` r   r   r   u  s     Kr   )rv   _stacklevelc                (    t         j                  j                  j                  |dz         t	        j
                  |      }t        j                  |      }|dn|j                  j                  d      rd fd}||S  ||      S )a>  Register an abstract implementation for this operator.

    An "abstract implementation" specifies the behavior of this operator on
    Tensors that carry no data. Given some input Tensors with certain properties
    (sizes/strides/storage_offset/device), it specifies what the properties of
    the output Tensors are.

    The abstract implementation has the same signature as the operator.
    It is run for both FakeTensors and meta tensors. To write an abstract
    implementation, assume that all Tensor inputs to the operator are
    regular CPU/CUDA/Meta tensors, but they do not have storage, and
    you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
    The abstract implementation must consist of only PyTorch operations
    (and may not directly access the storage or data of any input or
    intermediate Tensors).

    This API may be used as a decorator (see examples).

    For a detailed guide on custom ops, please see
    https://docs.google.com/document/d/1W--T6wz8IY8fOI0Vm8BF44PdBgs283QvpelJZWieQWQ/edit

    Examples:
        >>> import torch
        >>> import numpy as np
        >>> from torch import Tensor
        >>>
        >>> # Example 1: an operator without data-dependent output shape
        >>> torch.library.define(
        >>>     "mylib::custom_linear",
        >>>     "(Tensor x, Tensor weight, Tensor bias) -> Tensor")
        >>>
        >>> @torch.library.impl_abstract("mylib::custom_linear")
        >>> def custom_linear_abstract(x, weight):
        >>>     assert x.dim() == 2
        >>>     assert weight.dim() == 2
        >>>     assert bias.dim() == 1
        >>>     assert x.shape[1] == weight.shape[1]
        >>>     assert weight.shape[0] == bias.shape[0]
        >>>     assert x.device == weight.device
        >>>
        >>>     return (x @ weight.t()) + bias
        >>>
        >>> # Example 2: an operator with data-dependent output shape
        >>> torch.library.define("mylib::custom_nonzero", "(Tensor x) -> Tensor")
        >>>
        >>> @torch.library.impl_abstract("mylib::custom_nonzero")
        >>> def custom_nonzero_abstract(x):
        >>>     # Number of nonzero-elements is data-dependent.
        >>>     # Since we cannot peek at the data in an abstract impl,
        >>>     # we use the ctx object to construct a new symint that
        >>>     # represents the data-dependent size.
        >>>     ctx = torch.library.get_ctx()
        >>>     nnz = ctx.new_dynamic_size()
        >>>     shape = [nnz, x.dim()]
        >>>     result = x.new_empty(shape, dtype=torch.int64)
        >>>     return result
        >>>
        >>> @torch.library.impl("mylib::custom_nonzero", "cpu")
        >>> def custom_nonzero_cpu(x):
        >>>     x_np = x.numpy()
        >>>     res = np.stack(np.nonzero(x_np), axis=1)
        >>>     return torch.tensor(res, device=x.device)

    r   Nztorchvision.c                    t         j                  j                  j                  j	                        }t        |       }n| }|j                  j                  |      }j                  j                  |       | S rm   )
r$   r{   simple_registry	singletonfind_check_pystubs_onceabstract_implr   r,   r~   )r   entryfunc_to_registerrc   caller_module_namerv   rG   sources       r   innerzimpl_abstract.<locals>.inner  sv    ..88==hG)24CUV#$$--.>G?%%,,V4r   )
r$   r{   r|   
get_sourcesys	_getframeinspect	getmodulerh   
startswith)	rG   r   rv   r   r3   caller_moduler   r   r   s	   ` `    @@r   r   r     s    B ^^!!,,[1_=FMM+&E%%e,M "/!6M<R<R %*<*G*G*W!
 |;r   c                 "     d fd}|S )NFc                  B   r 	| i |S t         j                  j                  j                  
      }|j                  r
d 	| i |S t         j
                  j                  |j                  j                  |j                  j                        }|sB|j                  }|j                         j                         }t        d
 d d| d| d	      |d   }|k7  r6|j                         j                         }t        d
 d| d	 d
| d	      d 	| i |S )NTz
Operator 'z' was defined in C++ and has a Python abstract impl. In this situation, we require there to also be a companion C++ `m.impl_abstract_pystub("z\")` call, but we could not find one. Please add that to to the top of the C++ TORCH_LIBRARY(z-, ...) block the operator was registered in ()r   zC' specified that its python abstract impl is in the Python module 'z ' but it was actually found in 'zT'. Please either move the abstract impl or correct the m.impl_abstract_pystub call ()r$   r{   r|   	lookup_op_defined_in_pythonr%   _dispatch_pystubrR   rS   rT   re   _handledebugr>   )rt   ru   opmaybe_pystubre   cpp_filenamepystub_moduleactual_module_namecheckedr   rG   s          r   r   z"_check_pystubs_once.<locals>.inner  sU   (((^^!!++H5  G(((xx00JJOOJJ$$& I::<--/LXJ ';;M:N O77@k B//;nA?@ @ %Q.::<--/LXJ ',,9? ;)* +??KnAOP P
 T$V$$r   r   )r   rG   r   r   r   s   ``` @r   r   r     s    G %B Lr   c                  R    t         j                  j                  j                         S )zget_ctx() returns the current AbstractImplCtx object.

    Calling ``get_ctx()`` is only valid inside of an abstract impl
    (see :func:`torch.library.impl_abstract` for more usage details.
    )r$   r{   r   global_ctx_getterr   r   r   r   r     s     >>''99;;r   rg   rm   )r   z,torch._library.abstract_impl.AbstractImplCtx)'_opsr   typingr   r   r   r   r    r$   r/   	functoolsr   re
contextlibr   __all__r)   r   rQ   __annotations__r   r   r   r	   r1   contextmanagerrw   rx   compiler   singledispatchr   r   r   r
   r   r   r   r   r   r   r   <module>r      s\    + +      	  
 5C %s3x  x N^% ^%B    T']  "**_- $(r >< ><B 7   =D = =@:S :S : 7  [d [B$\<r   