aten_functions

experimental_experiment.torch_interpreter._aten_functions.aten_FunctionCtx(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], *args, **kwargs)[source]

not implemented

experimental_experiment.torch_interpreter._aten_functions.aten__enter_autocast(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], *args: List[Any]) str[source]

Returns the function returns a dummy which will be removed after the graph is created.

experimental_experiment.torch_interpreter._aten_functions.aten__exit_autocast(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], output_of_enter_auto_cast: str) str[source]

Returns the function returns a dummy which will be removed after the graph is created.

experimental_experiment.torch_interpreter._aten_functions.aten__log_softmax(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int = -1, unnamed: bool = False, dtype: torch.dtype | None = None) str[source]

logsoftmax

experimental_experiment.torch_interpreter._aten_functions.aten__log_softmax_backward_data(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, output: str, dim: int, input_dtype: torch.dtype | None = None)[source]

logsoftmax backward

experimental_experiment.torch_interpreter._aten_functions.aten__prelu_kernel(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, weight: str) str[source]

prelu

experimental_experiment.torch_interpreter._aten_functions.aten__prelu_kernel_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, x: str, weight: str) Tuple[str, str][source]

prelu backward

experimental_experiment.torch_interpreter._aten_functions.aten__scaled_dot_product_flash_attention_for_cpu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], query: str, key: str, value: str, dropout_p: float = 0.0, is_causal: bool = False, attn_mask: str | None = None, scale: float | None = None, return_debug_mask: bool = False, name: str = '_scaled_dot_product_flash_attention_for_cpu_default') Tuple[str, str, str, str, str, str, str, str, str][source]

_scaled_dot_product_flash_attention

experimental_experiment.torch_interpreter._aten_functions.aten__set_grad_enabled(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], enable: bool) str[source]

Returns the function returns a dummy which will be removed after the graph is created.

experimental_experiment.torch_interpreter._aten_functions.aten__softmax(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int = -1, half_to_float: bool = False) str[source]

softmax

experimental_experiment.torch_interpreter._aten_functions.aten__softmax_backward_data(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, y: str, dim: int, input_dtype: torch.dtype | None = None) str[source]

softmax backward

experimental_experiment.torch_interpreter._aten_functions.aten__to_copy(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, non_blocking=False, memory_format=None) str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten__unsafe_index_put(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], self: str, indices: List[str], values: str, accumulate: bool = False) str[source]

[…,:, …]

experimental_experiment.torch_interpreter._aten_functions.aten__unsafe_view(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, size: str) str[source]

slice

experimental_experiment.torch_interpreter._aten_functions.aten_abs(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

abs

experimental_experiment.torch_interpreter._aten_functions.aten_acos(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

acos

experimental_experiment.torch_interpreter._aten_functions.aten_acosh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

acosh

experimental_experiment.torch_interpreter._aten_functions.aten_add(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name: str = 'add') str[source]

add

experimental_experiment.torch_interpreter._aten_functions.aten_add_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: Any | None = None) str[source]

add

experimental_experiment.torch_interpreter._aten_functions.aten_add_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: Any | None = None) str[source]

add

experimental_experiment.torch_interpreter._aten_functions.aten_addmm(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], a: str, b: str, c: str, beta: float = 1.0, alpha: float = 1.0) str[source]

gemm

experimental_experiment.torch_interpreter._aten_functions.aten_alias(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten_all(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

cast

experimental_experiment.torch_interpreter._aten_functions.aten_amax(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int | None = None, keepdim: bool = False, output_dtype: torch.dtype | None = None, name: str = 'aten_amax') str[source]

reducemax

experimental_experiment.torch_interpreter._aten_functions.aten_and(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='and') str[source]

and

experimental_experiment.torch_interpreter._aten_functions.aten_and_(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='and') str[source]

and

experimental_experiment.torch_interpreter._aten_functions.aten_arange(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], start: int | None = None, end: int | None = None, step: int = 1, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, name: str = 'arange', requires_grad: bool = False) str[source]

arange

experimental_experiment.torch_interpreter._aten_functions.aten_arange_start(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], start: int | None = None, end: int | None = None, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None) str[source]

arange

experimental_experiment.torch_interpreter._aten_functions.aten_arange_start_step(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], start: int | None = None, end: int | None = None, step: int = 1, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None) str[source]

arange

experimental_experiment.torch_interpreter._aten_functions.aten_argmax(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int | None = None, keepdim: bool = False) str[source]

argmax

experimental_experiment.torch_interpreter._aten_functions.aten_as_strided(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, size: List[int], stride: List[int], storage_offset: int | None = None) str[source]

as_strided

experimental_experiment.torch_interpreter._aten_functions.aten_asin(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

asin

experimental_experiment.torch_interpreter._aten_functions.aten_asinh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

asinh

experimental_experiment.torch_interpreter._aten_functions.aten_atan(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

atan

experimental_experiment.torch_interpreter._aten_functions.aten_atanh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

atanh

experimental_experiment.torch_interpreter._aten_functions.aten_bmm(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

bmm

experimental_experiment.torch_interpreter._aten_functions.aten_cat(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], tensors: Tuple[str, ...], dim: int = 0, name='cat') str[source]

concat

experimental_experiment.torch_interpreter._aten_functions.aten_clone(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, memory_format: str | None = None, name='clone') str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten_conv2d(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], input: str, weight: str, bias: str | None = None, stride: Sequence[int] = (1, 1), padding: Sequence[int] = (0, 0), dilation: Sequence[int] = (1, 1), groups: int = 1) str[source]

conv

experimental_experiment.torch_interpreter._aten_functions.aten_convolution(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], input: str, weight: str, bias: str | None = None, stride: Sequence[int] = (1,), padding: Sequence[int] = (0,), dilation: Sequence[int] = (1,), transposed: bool = False, output_padding: Sequence[int] = (0,), groups: int = 1) str[source]

conv

experimental_experiment.torch_interpreter._aten_functions.aten_copy(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, src: str, non_blocking: bool = False, name: str = 'copy') str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten_copy_(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, src: str, non_blocking: bool = False) str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten_cos(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name: str = 'cos') str[source]

cos

experimental_experiment.torch_interpreter._aten_functions.aten_cosh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

cosh

experimental_experiment.torch_interpreter._aten_functions.aten_detach(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

identity

experimental_experiment.torch_interpreter._aten_functions.aten_div(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='div') str[source]

div

experimental_experiment.torch_interpreter._aten_functions.aten_div_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

div

experimental_experiment.torch_interpreter._aten_functions.aten_div_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: Any | None = None) str[source]

div

experimental_experiment.torch_interpreter._aten_functions.aten_dropout(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, p: str = 0.5, training: str = True) str[source]

dropout

experimental_experiment.torch_interpreter._aten_functions.aten_elu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, alpha: float = 1.0, scale: float = 1.0, input_scale: int = 1, inplace: bool = False, name='elu') str[source]

elu

experimental_experiment.torch_interpreter._aten_functions.aten_embedding(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], weight: str, indices: str, padding_idx: int | None = None, max_norm: int | None = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False) str[source]

embedding

experimental_experiment.torch_interpreter._aten_functions.aten_empty_like(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, memory_format=None) str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_empty_permuted(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], size: str, physical_layout: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, requires_grad: bool = False, pin_memory: bool = False, name: str = 'empty_permuted') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_empty_strided(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], size: str, stride: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, requires_grad: bool = False, pin_memory: bool = False, name: str = 'empty_strided') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_eq(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='eq') str[source]

equal

experimental_experiment.torch_interpreter._aten_functions.aten_eq_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

equal

experimental_experiment.torch_interpreter._aten_functions.aten_eq_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='eq_Tensor') str[source]

equal

experimental_experiment.torch_interpreter._aten_functions.aten_exp(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name: str = 'exp') str[source]

exp

experimental_experiment.torch_interpreter._aten_functions.aten_expand(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, sizes: str | List[int | str], implicit: bool = False, name: str = 'expand') str[source]

expand

experimental_experiment.torch_interpreter._aten_functions.aten_fill_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, v: str) str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_flatten(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, start_dim: int = 1, end_dim: int = -1) str[source]

flatten

experimental_experiment.torch_interpreter._aten_functions.aten_full(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], size: str, fill_value: float, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, requires_grad: bool = False, name: str = 'full') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_full_like(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, fill_value: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, memory_format=None, name: str = 'full_like') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_ge(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name: str = 'ge') str[source]

greater or equal

experimental_experiment.torch_interpreter._aten_functions.aten_ge_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

greater or equal

experimental_experiment.torch_interpreter._aten_functions.aten_gt(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name: str = 'gt') str[source]

greater

experimental_experiment.torch_interpreter._aten_functions.aten_gt_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

greater

experimental_experiment.torch_interpreter._aten_functions.aten_index_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, indices: List[int]) str[source]

[…,:, …]

experimental_experiment.torch_interpreter._aten_functions.aten_index_put(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, indices: List[str], values: str, accumulate: bool = False, name='aten_index_put') str[source]

[…,:, …]

experimental_experiment.torch_interpreter._aten_functions.aten_index_select(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int, index: str, name: str = 'index_select') str[source]

[…,:, …]

experimental_experiment.torch_interpreter._aten_functions.aten_layer_norm(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, normalized_shape: Sequence[int], weight: str | None = None, bias: str | None = None, eps: float = 1e-05, cudnn_enable: bool = False, name='layer_norm') str[source]

layer_norm

experimental_experiment.torch_interpreter._aten_functions.aten_leaky_relu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], a: str, negative_slope: float = 0.01, inplace: bool = False, name: str = 'leaky_relu') str[source]

leaky relu

experimental_experiment.torch_interpreter._aten_functions.aten_leaky_relu_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, x: str, negative_slope: float, self_is_result: bool, name='leaky_relu_backward') str[source]

leaky relu

experimental_experiment.torch_interpreter._aten_functions.aten_linear(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, weight: str, bias: str | None = None) str[source]

linear

experimental_experiment.torch_interpreter._aten_functions.aten_lt(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='lt') str[source]

less

experimental_experiment.torch_interpreter._aten_functions.aten_lt_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

less

experimental_experiment.torch_interpreter._aten_functions.aten_masked_fill_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, mask: str, value, name='masked_fill_Scalar') str[source]

masked

experimental_experiment.torch_interpreter._aten_functions.aten_matmul(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

matmul

experimental_experiment.torch_interpreter._aten_functions.aten_max_pool2d(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, kernel_size: Sequence[int], stride: Sequence[int] = (), padding: Sequence[int] = (0, 0), dilation: Sequence[int] = (1, 1), ceil_mode: bool = False) str[source]

maxpool

experimental_experiment.torch_interpreter._aten_functions.aten_max_pool2d_with_indices(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, kernel_size: Sequence[int], stride: Sequence[int] = (), padding: Sequence[int] = (0, 0), dilation: Sequence[int] = (1, 1), ceil_mode: bool = False) Tuple[str, str][source]

maxpool

experimental_experiment.torch_interpreter._aten_functions.aten_mean_dim(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int | List[int] | None = None, keepdim: bool = False, dtype: torch.dtype | None = None) str[source]

reducemean

experimental_experiment.torch_interpreter._aten_functions.aten_mm(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

matmul

experimental_experiment.torch_interpreter._aten_functions.aten_mul(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='mul') str[source]

mul

experimental_experiment.torch_interpreter._aten_functions.aten_mul_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

mul

experimental_experiment.torch_interpreter._aten_functions.aten_mul_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

mul

experimental_experiment.torch_interpreter._aten_functions.aten_native_dropout(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, p: float, train: bool = False, name: str = 'native_dropout')[source]

dropout

experimental_experiment.torch_interpreter._aten_functions.aten_native_layer_norm(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, normalized_shape: str, weight: str | None = None, bias: str | None = None, eps: float = 1e-05, name: str = 'aten_native_layer_norm') Tuple[str, str, str][source]

native_layer_norm

experimental_experiment.torch_interpreter._aten_functions.aten_ne(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='ne') str[source]

not equal

experimental_experiment.torch_interpreter._aten_functions.aten_ne_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='ne_Tensor') str[source]

not equal

experimental_experiment.torch_interpreter._aten_functions.aten_neg(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name='neg') str[source]

neg

experimental_experiment.torch_interpreter._aten_functions.aten_new_zeros(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, size: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, name: str = 'seros') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_not(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name: str = 'not') str[source]

not

experimental_experiment.torch_interpreter._aten_functions.aten_not_(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name: str = 'not') str[source]

not

experimental_experiment.torch_interpreter._aten_functions.aten_ones(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], size: str, dtype: int = None, layout=None, device: torch.device | None = None, pin_memory=None, name: str = 'ones') str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_ones_like(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, memory_format=None) str[source]

constantofshape

experimental_experiment.torch_interpreter._aten_functions.aten_permute(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dims: Sequence[int]) str[source]

transpose

experimental_experiment.torch_interpreter._aten_functions.aten_pow_Tensor_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, exponent: str, name: str = 'pow_Tensor_Scalar') str[source]

pow

experimental_experiment.torch_interpreter._aten_functions.aten_pow_Tensor_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, exponent: str, name: str = 'pow_Tensor_Tensor') str[source]

pow

experimental_experiment.torch_interpreter._aten_functions.aten_relu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, inplace: bool = False) str[source]

relu

experimental_experiment.torch_interpreter._aten_functions.aten_repeat(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, repeats: str, name: str = 'repeat') str[source]

repeat

experimental_experiment.torch_interpreter._aten_functions.aten_round(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

round

experimental_experiment.torch_interpreter._aten_functions.aten_rrelu_with_noise_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, x: str, noise: str, lower: float, upper: float, training: bool, self_is_result: bool, name: str = 'rrelu_with_noise_backward') str[source]

rrelu

experimental_experiment.torch_interpreter._aten_functions.aten_rsqrt(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

rqsrt

experimental_experiment.torch_interpreter._aten_functions.aten_rsub(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: float = 1) str[source]

rsub

experimental_experiment.torch_interpreter._aten_functions.aten_rsub_Scalar(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: float = 1) str[source]

rsub

experimental_experiment.torch_interpreter._aten_functions.aten_scaled_dot_product_attention(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], query: str, key: str, value: str, attn_mask: str | None = None, dropout_p: float = 0.0, is_causal: bool = False, scale: str | None = None, name: str = 'aten_scaled_dot_product_attention')[source]

scaled_dot_product_attention

experimental_experiment.torch_interpreter._aten_functions.aten_select_int(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int, index: int) str[source]

gather

experimental_experiment.torch_interpreter._aten_functions.aten_setitem(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, indices: Tuple[Any, ...], values: str) str[source]

scatter

experimental_experiment.torch_interpreter._aten_functions.aten_sigmoid(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

sigmoid

experimental_experiment.torch_interpreter._aten_functions.aten_sigmoid_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], out_grad: str, y: str) str[source]

sigmoid backward

See https://github.com/pytorch/pytorch/blob/main/torch/_decomp/decompositions.py#L108. conj_physical = identity for real number.

return out_grad * (y * (1 - y)).conj_physical()
experimental_experiment.torch_interpreter._aten_functions.aten_silu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, inplace: bool = False) str[source]

silu

experimental_experiment.torch_interpreter._aten_functions.aten_sin(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name='sin') str[source]

sin

experimental_experiment.torch_interpreter._aten_functions.aten_sinh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

sinh

experimental_experiment.torch_interpreter._aten_functions.aten_slice_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int = 0, start: int = 0, end: int | None = None, step: int | None = None) str[source]

slice

experimental_experiment.torch_interpreter._aten_functions.aten_slice_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, input_sizes: List[int], dim: int, start: int, end: int, step: int, name: str = 'slice_backward') str[source]

slice backward

experimental_experiment.torch_interpreter._aten_functions.aten_slice_scatter(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, src: str, dim: int = 0, start: int | None = None, end: int | None = None, step: int | None = None, name: str | None = None) str[source]

slice scatter

experimental_experiment.torch_interpreter._aten_functions.aten_softmax(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int = -1, dtype: torch.dtype | None = None, name: str = 'softmax') str[source]

softmax

experimental_experiment.torch_interpreter._aten_functions.aten_softmax_int(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int = -1, dtype: torch.dtype | None = None) str[source]

softmax

experimental_experiment.torch_interpreter._aten_functions.aten_split_with_sizes(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, split_sizes: str, dim: int = 0, name: str = 'split_with_sizes', use_sequence: bool = False) str[source]

split_to_sequence or split

experimental_experiment.torch_interpreter._aten_functions.aten_sqrt(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

sqrt

experimental_experiment.torch_interpreter._aten_functions.aten_squeeze(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name='squeeze') str[source]

squeeze

experimental_experiment.torch_interpreter._aten_functions.aten_squeeze_dim(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int, name='squeeze') str[source]

squeeze_dim

experimental_experiment.torch_interpreter._aten_functions.aten_sub(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, name='sub') str[source]

sub

experimental_experiment.torch_interpreter._aten_functions.aten_sub_Tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str, alpha: float) str[source]

sub

experimental_experiment.torch_interpreter._aten_functions.aten_sum(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int | List[int] | None = None, keepdim: bool = False, dtype: torch.dtype | None = None, name='sum') str[source]

reducesum

experimental_experiment.torch_interpreter._aten_functions.aten_sum_dim_IntList(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int | List[int] | None, keepdim: bool, dtype: torch.dtype | None = None) str[source]

reducesum

experimental_experiment.torch_interpreter._aten_functions.aten_t(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, name: str = 't') str[source]

transpose

experimental_experiment.torch_interpreter._aten_functions.aten_tan(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

tan

experimental_experiment.torch_interpreter._aten_functions.aten_tanh(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str) str[source]

tanh

experimental_experiment.torch_interpreter._aten_functions.aten_tanh_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], out_grad: str, y: str) str[source]

tanh backward

experimental_experiment.torch_interpreter._aten_functions.aten_tensor(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, indices: Tuple[Any, ...] | None = None) str[source]

[…, :, …]

experimental_experiment.torch_interpreter._aten_functions.aten_threshold_backward(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], grad_output: str, x: str, threshold: float, name: str = 'threshold_backward') str[source]

lessorequal

experimental_experiment.torch_interpreter._aten_functions.aten_transpose(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], input_name: str, dim0: int, dim1: int) str[source]

transpose

experimental_experiment.torch_interpreter._aten_functions.aten_transpose_int(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], input_name: str, dim0: int, dim1: int) str[source]

transpose

experimental_experiment.torch_interpreter._aten_functions.aten_tril(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, diagonal: int = 0) str[source]

tril

experimental_experiment.torch_interpreter._aten_functions.aten_triu(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, diagonal: int = 0) str[source]

trilu

experimental_experiment.torch_interpreter._aten_functions.aten_truediv(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, y: str) str[source]

truediv

experimental_experiment.torch_interpreter._aten_functions.aten_unsqueeze(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, dim: int) str[source]

unsqueeze

experimental_experiment.torch_interpreter._aten_functions.aten_view(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], x: str, size: str, node_name: str = 'view') str[source]

slice

experimental_experiment.torch_interpreter._aten_functions.aten_zeros(g: GraphBuilder, sts: Dict[str, Any] | None, outputs: List[str], size: str, dtype: torch.dtype | None = None, layout=None, device: torch.device | None = None, pin_memory=None, requires_grad: bool = False, name: str = 'zeros') str[source]

constantofshape