Exported into ONNX with Static Shapes

The following script shows the exported program for many short cases and various way to retrieve an ONNX model equivalent to the original model. The tested scenarios are described at Tested Scenarios.

<<<

import inspect
import textwrap
import pandas
from experimental_experiment.torch_interpreter.eval import discover, run_exporter
from experimental_experiment.ext_test_case import unit_test_going
from experimental_experiment.helpers import pretty_onnx

cases = discover()
print()
print(":ref:`Summary <lo-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
    sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
    print(f"* :ref:`{name} <lo-model-case-export-{name}>`")
print()

obs = []
for name, cls_model in sorted(cases.items()):
    print()
    print(f".. _lo-model-case-export-{name}:")
    print()
    print(name)
    print("=" * len(name))
    print()
    print("forward")
    print("+++++++")
    print()
    print("::")
    print()
    print(
        textwrap.indent(textwrap.dedent(inspect.getsource(cls_model.forward)), "    ")
    )
    print()
    for exporter in (
        "custom-fallback",
        "custom-dec",
        "custom-tracing",
        "dynamo-ir",
        "script",
    ):
        expname = exporter.replace("export-", "")
        print()
        print(expname)
        print("+" * len(expname))
        print()
        res = run_exporter(exporter, cls_model, False, quiet=True)
        case_ref = f":ref:`{name} <lo-model-case-export-{name}>`"
        if "exported" in res:
            print("::")
            print()
            print(textwrap.indent(pretty_onnx(res["onnx"]), "    "))
            print()
            obs.append(dict(case=case_ref, error="", exporter=exporter))
        else:
            print("**FAILED**")
            print()
            print("::")
            print()
            print(textwrap.indent(str(res["error"]), "    "))
            print()
            obs.append(dict(case=case_ref, error="FAIL", exporter=exporter))

print()
print(".. _lo-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()

>>>

Summary

AtenAsStrided

forward

def forward(self, x):
    y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
    return y

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,)                      -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> _reshape_x0
  Gather(_reshape_x0, init7_s128_) -> _onx_gather__reshape_x00
    Reshape(_onx_gather__reshape_x00, init7_s4_2_2_8_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 8, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,)                      -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> _reshape_x0
  Gather(_reshape_x0, init7_s128_) -> _onx_gather__reshape_x00
    Reshape(_onx_gather__reshape_x00, init7_s4_2_2_8_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 8, 4]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,)                      -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> _reshape_x0
  Gather(_reshape_x0, init7_s128_) -> _onx_gather__reshape_x00
    Reshape(_onx_gather__reshape_x00, init7_s4_2_2_8_4) -> output
output: name='output' type=dtype('float32') shape=[2, 2, 8, 4]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[2, 2, 8, ...) -> val_0
Constant(value=[128, 8, 1...) -> val_1
Constant(value=[4]) -> rank_tensor
Constant(value_int=0) -> indices
SequenceEmpty() -> one_seq
Constant(value_int=4) -> rank_0
Loop(rank_0, , one_seq, indices, body=G1) -> one_seq_16, indices_17
Constant(value_ints=[-1]) -> tmp_18
  Reshape(x, tmp_18) -> self_flatten
Constant(value_int=0) -> storage_offset
  CastLike(storage_offset, indices_17) -> storage_offset_cast
  Add(indices_17, storage_offset_cast) -> indices_19
    Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,one_seq_1,indices_2 -> cond_out,one_seq_15,indices_13
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='one_seq_1' type='NOTENSOR' shape=None
input: name='indices_2' type='NOTENSOR' shape=None
Constant(value_floats=[1.0]) -> tmp_14
  SequenceInsert(one_seq_1, tmp_14) -> one_seq_15
Constant(value=4) -> rank_3_cast
  Sub(rank_3_cast, i) -> tmp
Constant(value=1) -> int64_1_cast
  Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
  Expand(indices_2, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Constant(value=0) -> int64_0_cast
Constant(value=1) -> int64_1_5_cast
  Range(int64_0_cast, size_dim_j, int64_1_5_cast) -> tmp_6
  Mul(tmp_6, stride_dim_j) -> add_value
Constant(value=0) -> int64_0_7_cast
  Equal(i, int64_0_7_cast) -> cond
    If(cond, then_branch=G2, else_branch=G3) -> shape_11
    Reshape(add_value, shape_11) -> add_value_12
    Add(indices_4, add_value_12) -> indices_13
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
output: name='indices_13' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 --  -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 --  -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
  Concat(tmp_8, ones, axis=0) -> shape_9
    Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 --  -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 --  -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
  Concat(tmp_8, ones, axis=0) -> shape_9
    Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None

script

opset: domain='' version=17
input: name='onnx::Reshape_0' type=dtype('float32') shape=[2, 2, 8, 8]
Constant(value=[-1]) -> /Constant_output_0
  Reshape(onnx::Reshape_0, /Constant_output_0, allowzero=0) -> /Reshape_output_0
Constant(value=[[[[0, 1, ...) -> /Constant_1_output_0
  Gather(/Reshape_output_0, /Constant_1_output_0) -> 4
output: name='4' type=dtype('float32') shape=[2, 2, 8, 4]

AtenInterpolate

forward

def forward(self, x):
    y = torch.nn.functional.interpolate(
        x,
        scale_factor=2.0,
        mode="bilinear",
        recompute_scale_factor=False,
    )
    return y

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
init: name='_onx_concat_init7_s2_2_20' type=int64 shape=(4,) -- array([2, 2, 6, 8])-- GraphBuilder.constant_folding.from/fold(init7_s2_2_2,init7_s2_6_8)##init7_s2_2_2/_aten_upsample_output_size.batch_channel##init7_s2_6_8/_aten_upsample_output_size.rsize
Resize(x, , , _onx_concat_init7_s2_2_20, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 6, 8]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
init: name='_onx_concat_init7_s2_2_20' type=int64 shape=(4,) -- array([2, 2, 6, 8])-- GraphBuilder.constant_folding.from/fold(init7_s2_2_2,init7_s2_6_8)##init7_s2_2_2/_aten_upsample_output_size.batch_channel##init7_s2_6_8/_aten_upsample_output_size.rsize
Resize(x, , , _onx_concat_init7_s2_2_20, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 6, 8]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
init: name='_onx_concat_init7_s2_2_20' type=int64 shape=(4,) -- array([2, 2, 6, 8])-- GraphBuilder.constant_folding.from/fold(init7_s2_2_2,init7_s2_6_8)##init7_s2_2_2/_aten_upsample_output_size.batch_channel##init7_s2_6_8/_aten_upsample_output_size.rsize
Resize(x, , , _onx_concat_init7_s2_2_20, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
Constant(value_floats=[1.0,1.0,2.0,2.0]) -> val_0
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=[2, 2, 6, 8]

script

opset: domain='' version=17
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
Constant(value=[1.0, 1.0,...) -> /Constant_output_0
Resize(x, , /Constant_output_0, coordinate_transformation_mode=b'half_pixel', cubic_coeff_a=-0.75, mode=b'linear', nearest_mode=b'floor') -> 5
output: name='5' type=dtype('float32') shape=[2, 2, 6, 8]

AtenNonZero

forward

def forward(self, x):
    y = torch.nonzero(x)
    return y

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> _onx_nonzero_x0
  Transpose(_onx_nonzero_x0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> _onx_nonzero_x0
  Transpose(_onx_nonzero_x0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> _onx_nonzero_x0
  Transpose(_onx_nonzero_x0, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['NEWDIM_nonzero', 2]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> val_0
  Transpose(val_0, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['u0', 2]

script

opset: domain='' version=17
input: name='onnx::NonZero_0' type=dtype('float32') shape=[3, 4]
NonZero(onnx::NonZero_0) -> /NonZero_output_0
  Transpose(/NonZero_output_0, perm=[1,0]) -> 2
output: name='2' type=dtype('int64') shape=['Transpose2_dim_0', 2]

AtenNonZeroTuple

forward

def forward(self, x):
    y = torch.nonzero(x, as_tuple=True)
    return y[0], y[1]

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
  Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
    Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u1']
output: name='output_1' type=dtype('int64') shape=['u1']

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
  Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
    Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u3']
output: name='output_1' type=dtype('int64') shape=['u3']

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
  Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
    Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['d_output_0_0']
output: name='output_1' type=dtype('int64') shape=['d_output_1_0']

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
Constant(value_ints=[1]) -> unbind_axis
NonZero(x) -> val_0
  Transpose(val_0, perm=[1,0]) -> nonzero
    Split(nonzero, axis=1, num_outputs=2) -> unbind_split_0, unbind_split_1
  Squeeze(unbind_split_0, unbind_axis) -> getitem
Squeeze(unbind_split_1, unbind_axis) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['u0']
output: name='getitem_1' type=dtype('int64') shape=['u0']

script

opset: domain='' version=17
input: name='onnx::NonZero_0' type=dtype('float32') shape=[3, 4]
Constant(value=[1, 1]) -> /Constant_output_0
NonZero(onnx::NonZero_0) -> /NonZero_output_0
  Transpose(/NonZero_output_0, perm=[1,0]) -> /Transpose_output_0
  Split(/Transpose_output_0, /Constant_output_0, axis=1) -> /Split_output_0, /Split_output_1
Constant(value=[1]) -> /Constant_1_output_0
  Squeeze(/Split_output_0, /Constant_1_output_0) -> 7
Constant(value=[1]) -> /Constant_2_output_0
  Squeeze(/Split_output_1, /Constant_2_output_0) -> 9
output: name='7' type=dtype('int64') shape=['Squeeze7_dim_0']
output: name='9' type=dtype('int64') shape=['Squeeze7_dim_0']

AtenRollPos

forward

def forward(self, x):
    return torch.roll(x, 1, -1)

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output
output: name='output' type=dtype('float32') shape=[2, 3, 4]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
Constant(value_ints=[0]) -> tmp_4
Constant(value=[-1]) -> dim_tensor
Constant(value=[1]) -> shift_tensor
Constant(value=[3]) -> slice_length_3
  Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Constant(value=[24]) -> tmp_6
  Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
    Concat(prefix, suffix, axis=-1) -> roll
output: name='roll' type=dtype('float32') shape=[2, 3, 4]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[2, 3, 4]
Constant(value=[-1]) -> /Constant_output_0
Constant(value=[-1]) -> /Constant_1_output_0
Constant(value=[922337203...) -> /Constant_2_output_0
  Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0) -> /Slice_output_0
Constant(value=[-1]) -> /Constant_3_output_0
Constant(value=[0]) -> /Constant_4_output_0
Constant(value=[-1]) -> /Constant_5_output_0
  Slice(onnx::Slice_0, /Constant_4_output_0, /Constant_5_output_0, /Constant_3_output_0) -> /Slice_1_output_0
    Concat(/Slice_output_0, /Slice_1_output_0, axis=-1) -> 9
output: name='9' type=dtype('float32') shape=[2, 3, 4]

AtenRollRelu

forward

def forward(self, x):
    return torch.relu(torch.roll(x, -1, -1))

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
    Relu(_onx_concat_slice_x00) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
    Relu(_onx_concat_slice_x00) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
  Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
    Relu(_onx_concat_slice_x00) -> output
output: name='output' type=dtype('float32') shape=[2, 3, 4]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 4]
Constant(value_ints=[0]) -> tmp_4
Constant(value=[-1]) -> dim_tensor
Constant(value=[-1]) -> shift_tensor
Constant(value=[1]) -> slice_length_3
  Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Constant(value=[24]) -> tmp_6
  Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
    Concat(prefix, suffix, axis=-1) -> roll
      Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=[2, 3, 4]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[2, 3, 4]
Constant(value=[-1]) -> /Constant_output_0
Constant(value=[1]) -> /Constant_1_output_0
Constant(value=[922337203...) -> /Constant_2_output_0
  Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0) -> /Slice_output_0
Constant(value=[-1]) -> /Constant_3_output_0
Constant(value=[0]) -> /Constant_4_output_0
Constant(value=[1]) -> /Constant_5_output_0
  Slice(onnx::Slice_0, /Constant_4_output_0, /Constant_5_output_0, /Constant_3_output_0) -> /Slice_1_output_0
    Concat(/Slice_output_0, /Slice_1_output_0, axis=-1) -> /Concat_output_0
      Relu(/Concat_output_0) -> 10
output: name='10' type=dtype('float32') shape=[2, 3, 4]

BuildInIsInstance

forward

def forward(self, x, lx: list | torch.Tensor):
    if isinstance(lx, list):
        t = lx[0] * lx[1].sum(axis=1, keepdim=True)
        return torch.sigmoid(self.linear(x)) - self.buff + t
    return torch.sigmoid(self.linear(x)) - self.buff + lx

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.362, -0.473, -0.427], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.461], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.33 ,  0.45 ,  0.204], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.486], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

FAILED

Type is unknown for result 'lx', known_types={'x': 1, 'linear.weight': 1, 'linear.bias': 1, '_sub_Linear_input_1': 1, '_sub_Linear__onx_transpose_weight0': 1, '_sub_Linear__onx_matmul_input_10': 1, '_sub_Linear_linear': 1, '_sub_Linear_output': 1, 'linear': 1, 'sigmoid': 1, 'buff': 1, 'sub': 1}
--DEBUG--
[GraphBuilder-ZIU] Message starts, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_sub_Linear__onx_matmul_input_10': (4, 1),
 '_sub_Linear__onx_transpose_weight0': (3, 1),
 '_sub_Linear_input_1': (4, 3),
 '_sub_Linear_linear': (4, 1),
 '_sub_Linear_output': (4, 1),
 'buff': (1,),
 'linear': (4, 1),
 'linear.bias': (1,),
 'linear.weight': (1, 3),
 'sigmoid': (4, 1),
 'sub': (4, 1),
 'x': (4, 3)}
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
 '_sub_Linear__onx_transpose_weight0': 1,
 '_sub_Linear_input_1': 1,
 '_sub_Linear_linear': 1,
 '_sub_Linear_output': 1,
 'buff': 1,
 'linear': 1,
 'linear.bias': 1,
 'linear.weight': 1,
 'sigmoid': 1,
 'sub': 1,
 'x': 1}
_known_value_shape={}
_known_constants=['_sub_Linear__onx_transpose_weight0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    add -> {output}
    buff -> {sub}
    linear -> {sigmoid}
    lx -> {add}
    sigmoid -> {sub}
    sub -> {add}
    x -> {linear}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
    lx: ('run_node', ('', '')) --- :::
    linear: ('run_node', ('', '')) --- 1:2:(4, 1):
    sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
    buff: ('run_node', ('', '')) --- 1:1:(1,):
    sub: ('run_node', ('', '')) --- 1:2:(4, 1):
    add: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule(
  (linear): Linear(in_features=3, out_features=1, bias=True)
)



def forward(self, x, lx : list_torch_Tensor):
    linear = self.linear(x);  x = None
    sigmoid = torch.sigmoid(linear);  linear = None
    buff = self.buff
    sub = sigmoid - buff;  sigmoid = buff = None
    add = sub + lx;  sub = lx = None
    return add

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %lx : list | torch.Tensor [num_users=1] = placeholder[target=lx]
    %linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
    %sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
    %buff : [num_users=1] = get_attr[target=buff]
    %sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
    %add : [num_users=1] = call_function[target=operator.add](args = (%sub, %lx), kwargs = {})
    return add
-- process.progress --
node 6/8 target=<built-in function add>
--
[GraphBuilder-ZIU.make_tensor_input] x[1:4x3]
[GraphBuilder-ZIU.make_tensor_input] lx[0:]
[GraphBuilder-ZIU.make_initializer] linear.weight[torch.float32:torch.float32:[-0.22660338878631592, -0.5769753456115723, -0.5719130635261536]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-ZIU.make_initializer] linear.bias[torch.float32:torch.float32:[-0.4451240003108978]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-ZIU.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-ZIU.make_node] .make_nodes     [#:#   ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-ZIU.make_node] linear          [#:#   ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-ZIU.make_node] Opset           [##:#  ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-ZIU.make_node] Opset2          [##:#  ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-ZIU.make_node] .output         [#:#   ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-ZIU.make_node] .make_nodes2    [#:#   ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-ZIU.make_node] sigmoid         [#:#   ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-ZIU.make_node] sub             [##:#  ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-ZIU] Message completed, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs..

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.094, 0.501, 0.467], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.211], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
  ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
    Mul(lx_0, sum_1) -> mul
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, buff) -> sub
      Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]

script

opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.239,  0.423, -0.096], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.22], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_6
  ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_6, keepdims=1) -> /ReduceSum_output_0
    Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
  Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
    Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
      Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]

BuildInLen

forward

def forward(self, x, lx: list):
    t = lx[0] * lx[1].sum(axis=1, keepdim=True)
    if len(lx) > 2:
        t = t + lx[2].sum(axis=1, keepdim=True)
    return torch.sigmoid(self.linear(x)) - self.buff + t

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.304, -0.356,  0.509], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.56], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.476,  0.164, -0.183], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.217], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

FAILED

len(.) expects an integer, len needs to be replaced. You should use _len.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.262, 0.564, 0.54 ], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.231], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
  ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
    Mul(lx_0, sum_1) -> mul
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, buff) -> sub
      Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]

script

opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.038,  0.458, -0.463], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.551], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_6
  ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_6, keepdims=1) -> /ReduceSum_output_0
    Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
  Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
    Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
      Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]

ComplexPolar

forward

def forward(self, x, angle):
    return torch.polar(x, angle)

custom-fallback

FAILED

[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.

custom-dec

FAILED

[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.

custom-tracing

FAILED

[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
input: name='angle' type=dtype('float32') shape=[4, 4]
Constant(value=[-1]) -> int64_m1_1d
Cos(angle) -> tmp
  Mul(x, tmp) -> tmp_0
  Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
  Mul(x, tmp_1) -> tmp_2
Constant(value=[-1]) -> int64_m1_1d_3
  Unsqueeze(tmp_2, int64_m1_1d_3) -> imag
    Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=[4, 4, 2]

script

FAILED

Exporting the operator 'aten::polar' to ONNX opset version 17 is not supported

ControlFlowCond

forward

def forward(self, x):
    def true_fn(x):
        return torch.sin(x)

    def false_fn(x):
        return torch.cos(x)

    return torch.cond(x.sum() > 0, true_fn, false_fn, [x])

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None

custom-dec

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, scalar_tensor_default) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 --  -> sin_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 --  -> cos_false_graph_0
Cos(x) -> cos_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=[5, 3]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowCond2Inputs

forward

def forward(self, x, y):
    def true_fn(x, y):
        return torch.sin(x), torch.cos(x) + y

    def false_fn(x, y):
        return torch.cos(x), torch.sin(x) + y

    return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
input: name='y' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> sin2
Add(sin2, y) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cos2
Add(cos2, y) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None

custom-dec

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
input: name='y' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> sin2
Add(sin2, y) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cos2
Add(cos2, y) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 3]
input: name='y' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, scalar_tensor_default) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=[5, 3]
output: name='getitem_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 --  -> sin_true_graph_0,add_true_graph_0
Cos(x) -> cos
Add(cos, y) -> add_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=[5, 3]
output: name='add_true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 --  -> cos_false_graph_0,add_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_2
Add(sin_2, y) -> add_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=[5, 3]
output: name='add_false_graph_0' type=dtype('float32') shape=[5, 3]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowCond2Outputs

forward

def forward(self, x):
    def true_fn(x):
        return torch.sin(x), torch.cos(x)

    def false_fn(x):
        return torch.cos(x), torch.sin(x)

    return torch.cond(x.sum() > 0, true_fn, false_fn, [x])

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None

custom-dec

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0,cond#1
Cos(x) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, scalar_tensor_default) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=[5, 3]
output: name='getitem_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 --  -> sin_true_graph_0,cos_true_graph_0
Cos(x) -> cos_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=[5, 3]
output: name='cos_true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 --  -> cos_false_graph_0,sin_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=[5, 3]
output: name='sin_false_graph_0' type=dtype('float32') shape=[5, 3]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowCondConstant

forward

def forward(self, x):
    def true_fn(x):
        return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)

    def false_fn(x):
        return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)

    return torch.cond(x.sum() > 0, true_fn, false_fn, [x])

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[1024, 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0
Constant(value=[1, 1024]) -> init7_s2_1_10242
  ConstantOfShape(init7_s2_1_10242, value=[1.0]) -> ones2
Cos(x) -> cos2
  Add(cos2, ones2) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0
Constant(value=[1024, 102...) -> init7_s2_1024_10242
  ConstantOfShape(init7_s2_1024_10242, value=[1.0]) -> ones32
Sin(x) -> sin2
  Sub(sin2, ones32) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None

custom-dec

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[1024, 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
  Greater(sum_1, init1_s_) -> gt
    If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 --  -> cond#0
Constant(value=[1, 1024]) -> init7_s2_1_10242
  ConstantOfShape(init7_s2_1_10242, value=[1.0]) -> ones2
Cos(x) -> cos2
  Add(cos2, ones2) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 --  -> cond#0
Constant(value=[1024, 102...) -> init7_s2_1024_10242
  ConstantOfShape(init7_s2_1024_10242, value=[1.0]) -> ones32
Sin(x) -> sin2
  Sub(sin2, ones32) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[1024, 1024]
init: name='ones_2' type=float32 shape=(1, 1024)
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, scalar_tensor_default) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 --  -> sub_true_graph_0
Constant(value=[1024, 102...) -> val_1
Sin(x) -> sin
Constant(value=1.0) -> val_3
  Expand(val_3, val_1) -> ones
  Sub(sin, ones) -> sub_true_graph_0
output: name='sub_true_graph_0' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 --  -> add_false_graph_0
Cos(x) -> cos
Add(cos, ones_2) -> add_false_graph_0
output: name='add_false_graph_0' type=dtype('float32') shape=[1024, 1024]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowCondNestedModule

forward

def forward(self, x):
    def true_fn(x):
        return self.submodule(x)

    def false_fn(x):
        return x - self.weight

    y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
    return y

custom-fallback

FAILED

'GraphProto' object is not iterable

custom-dec

FAILED

'GraphProto' object is not iterable

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('int64') shape=[2]
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
Constant(value=0) -> val_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, val_0) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_3 - att.then_branch=G1 -- level=1 --  -> getitem_true_graph_0
Abs(x) -> abs_1
  ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Constant(value=100) -> val_0_2
  Greater(sum_1_2, val_0_2) -> gt_2
    If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=2 --  -> mul_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_true_graph_0__true_graph_0
output: name='mul_true_graph_0__true_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=2 --  -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=1 --  -> mul_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_true_graph_0__true_graph_0
output: name='mul_true_graph_0__true_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=1 --  -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_3 - att.else_branch=G2 -- level=1 --  -> sub_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_false_graph_0
output: name='sub_false_graph_0' type=dtype('float32') shape=[2]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowCondNonZero

forward

def forward(self, input_ids, image_features, vocab_size):
    def then_branch(input_ids, image_features, vocab_size):
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_shape[-1])

        condition = (input_ids < 0) & (input_ids > -int(1e9))
        positions = torch.nonzero(condition, as_tuple=True)
        input_ids = input_ids.clamp_min(0).clamp_max(vocab_size)
        return (input_ids, positions[0], positions[1])

    def else_branch(input_ids, image_features, vocab_size):
        r = torch.where(torch.zeros((1, 1), dtype=torch.bool))
        return (input_ids, r[0], r[1])

    a, b, c = torch.cond(
        image_features.numel() > 0,
        then_branch,
        else_branch,
        [input_ids, image_features, vocab_size],
    )
    return a, b, c

custom-fallback

opset: domain='' version=18
input: name='input_ids' type=dtype('int64') shape=[2, 12]
input: name='image_features' type=dtype('float32') shape=[2, 16]
input: name='vocab_size' type=dtype('int64') shape=[1]
init: name='init7_s2_-1_12' type=int64 shape=(2,) -- array([-1, 12])  -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_1025' type=int64 shape=(1,) -- array([1025])     -- Opset.make_node.1/Shape
init: name='_reshape_init7_s_00' type=int64 shape=(1,) -- array([0])  -- GraphBuilder.constant_folding.from/fold(init7_s1_1,init7_s_0)##init7_s_0/shape_type_compute._cast_inputs.1(lt_Scalar)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s_-10000000000' type=int64 shape=(1,) -- array([-1000000000])-- GraphBuilder.constant_folding.from/fold(init7_s1_1,init7_s_-1000000000)##init7_s_-1000000000/shape_type_compute._cast_inputs.1(gt_Scalar)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Reshape(input_ids, init7_s2_-1_12) -> view
  Less(view, _reshape_init7_s_00) -> lt
Greater(view, _reshape_init7_s_-10000000000) -> gt
  And(lt, gt) -> and_1
    NonZero(and_1) -> _onx_nonzero_and_10
      Split(_onx_nonzero_and_10, num_outputs=2) -> _onx_split_nonzero_and_100, _onx_split_nonzero_and_101
        Reshape(_onx_split_nonzero_and_100, init7_s1_-1) -> output_1
Reshape(_onx_split_nonzero_and_101, init7_s1_-1) -> output_2
Clip(view, init7_s1_0, init7_s1_1025) -> output_0
output: name='output_0' type=dtype('int64') shape=[2, 12]
output: name='output_1' type=dtype('int64') shape=['u2']
output: name='output_2' type=dtype('int64') shape=['u2']

custom-dec

opset: domain='' version=18
input: name='input_ids' type=dtype('int64') shape=[2, 12]
input: name='image_features' type=dtype('float32') shape=[2, 16]
input: name='vocab_size' type=dtype('int64') shape=[1]
init: name='init7_s2_-1_12' type=int64 shape=(2,) -- array([-1, 12])  -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_1025' type=int64 shape=(1,) -- array([1025])     -- Opset.make_node.1/Shape
init: name='_reshape_init7_s_00' type=int64 shape=(1,) -- array([0])  -- GraphBuilder.constant_folding.from/fold(init7_s1_1,init7_s_0)##init7_s_0/shape_type_compute._cast_inputs.1(lt_Scalar)##shape_type_compute._cast_inputs.1(ge)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s_-10000000000' type=int64 shape=(1,) -- array([-1000000000])-- GraphBuilder.constant_folding.from/fold(init7_s1_1,init7_s_-1000000000)##init7_s_-1000000000/shape_type_compute._cast_inputs.1(gt_Scalar)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Reshape(input_ids, init7_s2_-1_12) -> view
  Less(view, _reshape_init7_s_00) -> lt
Greater(view, _reshape_init7_s_-10000000000) -> gt
  And(lt, gt) -> and_1
    NonZero(and_1) -> _onx_nonzero_and_10
      Split(_onx_nonzero_and_10, num_outputs=2) -> _onx_split_nonzero_and_100, _onx_split_nonzero_and_101
        Reshape(_onx_split_nonzero_and_100, init7_s1_-1) -> output_1
Reshape(_onx_split_nonzero_and_101, init7_s1_-1) -> output_2
Clip(view, init7_s1_0, init7_s1_1025) -> output_0
output: name='output_0' type=dtype('int64') shape=[2, 12]
output: name='output_1' type=dtype('int64') shape=['u4']
output: name='output_2' type=dtype('int64') shape=['u4']

custom-tracing

FAILED

val is None for node=output, output=(getitem, getitem_1, getitem_2), a='getitem', o='output_0', has_type=False, has_rank=False, has_shape=False, 
meta={}
node.__dict__={}
--DEBUG--
[GraphBuilder-ZPG] Message starts, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.
--LOCAL FUNCTIONS--
    local_functions,_cb_cond_then_branch_0(['x', 'y', 'z']) -> ['output_0', 'output_1', 'output_2']
    local_functions,_cb_cond_else_branch_0(['x', 'y', 'z']) -> ['output_0', 'output_1', 'output_2']
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_reshape_init7_s_00': (1,),
 '_reshape_numel0': (1,),
 'gt': (),
 'image_features': (2, 16),
 'init7_s1_1': (1,),
 'init7_s_0': (),
 'input_ids': (2, 12),
 'numel': (),
 'vocab_size': ()}
_known_types={'_reshape_init7_s_00': 7,
 '_reshape_numel0': 7,
 'gt': 9,
 'image_features': 1,
 'init7_s1_1': 7,
 'init7_s_0': 7,
 'input_ids': 7,
 'numel': 7,
 'vocab_size': 7}
_known_value_shape={}
_known_constants=['_reshape_init7_s_00', 'init7_s1_1', 'init7_s_0']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    _cb_cond_else_branch_0 -> {condcc}
    _cb_cond_then_branch_0 -> {condcc}
    condcc -> {getitem_2, getitem, getitem_1}
    getitem -> {output}
    getitem_1 -> {output}
    getitem_2 -> {output}
    gt -> {condcc}
    image_features -> {condcc, numel}
    input_ids -> {condcc}
    numel -> {gt}
    output -> set()
    vocab_size -> {condcc}
--TORCH-SHAPES--
    input_ids: ('run_node', (('example_value', torch.int64, torch.Size([2, 12])), '')) --- 7:2:(2, 12):
    image_features: ('run_node', (('example_value', torch.float32, torch.Size([2, 16])), '')) --- 1:2:(2, 16):
    vocab_size: ('run_node', (('example_value', torch.int64, torch.Size([])), '')) --- 7:0:():
    numel: ('run_node', ('', '')) --- 7:0:():
    gt: ('run_node', ('', '')) --- 9:0:():
    _cb_cond_then_branch_0: ('run_node', ('', '')) --- :::
    _cb_cond_else_branch_0: ('run_node', ('', '')) --- :::
    condcc: ('run_node', ('', '')) --- :::
    getitem: ('run_node', ('', '')) --- :::
    getitem_1: ('run_node', ('', '')) --- :::
    getitem_2: ('run_node', ('', '')) --- :::
    output: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, input_ids, image_features, vocab_size):
    numel = image_features.numel()
    gt = numel > 0;  numel = None
    _cb_cond_then_branch_0 = self._cb_cond_then_branch_0
    _cb_cond_else_branch_0 = self._cb_cond_else_branch_0
    condcc = torch.ops.higher_order.cond(gt, _cb_cond_then_branch_0, _cb_cond_else_branch_0, [input_ids, image_features, vocab_size]);  gt = _cb_cond_then_branch_0 = _cb_cond_else_branch_0 = input_ids = image_features = vocab_size = None
    getitem = condcc[0]
    getitem_1 = condcc[1]
    getitem_2 = condcc[2];  condcc = None
    return (getitem, getitem_1, getitem_2)

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %input_ids : [num_users=1] = placeholder[target=input_ids]
    %image_features : [num_users=2] = placeholder[target=image_features]
    %vocab_size : [num_users=1] = placeholder[target=vocab_size]
    %numel : [num_users=1] = call_method[target=numel](args = (%image_features,), kwargs = {})
    %gt : [num_users=1] = call_function[target=operator.gt](args = (%numel, 0), kwargs = {})
    %_cb_cond_then_branch_0 : [num_users=1] = get_attr[target=_cb_cond_then_branch_0]
    %_cb_cond_else_branch_0 : [num_users=1] = get_attr[target=_cb_cond_else_branch_0]
    %condcc : [num_users=3] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_then_branch_0, %_cb_cond_else_branch_0, [%input_ids, %image_features, %vocab_size]), kwargs = {})
    %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 0), kwargs = {})
    %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 1), kwargs = {})
    %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 2), kwargs = {})
    return (getitem, getitem_1, getitem_2)
-- process.progress --
node 11/12 target=output
--
[GraphBuilder-ZPG.make_tensor_input] input_ids[7:2x12]
[GraphBuilder-ZPG.make_tensor_input] image_features[1:2x16]
[GraphBuilder-ZPG.make_tensor_input] vocab_size[7:]
[GraphBuilder-ZPG.make_initializer] init7_s_0[int64:int64:[0]] - SOURCE: shape_type_compute._cast_inputs.1(gt)
[GraphBuilder-ZPG.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-ZPG.make_node] meth_numel      [#:#   ] Size:['image_features']->['numel']
[GraphBuilder-ZPG.make_node] gt              [##:#  ] Reshape:['numel', 'init7_s1_1']->['_reshape_numel0']
[GraphBuilder-ZPG.make_node] gt2             [##:#  ] Reshape:['init7_s_0', 'init7_s1_1']->['_reshape_init7_s_00']
[GraphBuilder-ZPG.make_node] gt3             [##:#  ] Greater:['numel', 'init7_s_0']->['gt']
[GraphBuilder-ZPG.make_node] cond            [#:--- ] If:['gt']->['condcc#0', 'condcc#1', 'condcc#2']
[GraphBuilder-ZPG.make_node] getitemB_tuple  [-:-   ] Identity:['condcc#0']->['getitem']
[GraphBuilder-ZPG.make_node] getitemB_tuple2 [-:-   ] Identity:['condcc#1']->['getitem_1']
[GraphBuilder-ZPG.make_node] getitemB_tuple3 [-:-   ] Identity:['condcc#2']->['getitem_2']
[GraphBuilder-ZPG.make_node] .output         [-:-   ] Identity:['getitem']->['output_0']
[GraphBuilder-ZPG.make_node] .output2        [-:-   ] Identity:['getitem_1']->['output_1']
[GraphBuilder-ZPG.make_node] .output3        [-:-   ] Identity:['getitem_2']->['output_2']
[GraphBuilder-ZPG] Message completed, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.

dynamo-ir

FAILED

Input mismatch, inputs[0]=(T7r2,T1r2,int) but names=['input_ids', 'image_features'], model=ControlFlowCondNonZero, export='dynamo-ir'

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowNestCond

forward

def forward(self, x):
    def true_fn2(x):
        def true_fn1(x):
            return torch.sin(x)

        def false_fn1(x):
            return torch.cos(x)

        return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])

    def false_fn2(x):
        return -x

    return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])

custom-fallback

FAILED

'GraphProto' object is not iterable

custom-dec

FAILED

'GraphProto' object is not iterable

custom-tracing

FAILED

aten_meth_sum() missing 1 required positional argument: 'axis'

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
  Greater(sum_1, scalar_tensor_default) -> gt
    If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 --  -> getitem_true_graph_0
Constant(value=0.0) -> scalar_tensor_default_2
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
  Less(sum_1_2, scalar_tensor_default_2) -> lt
    If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=2 --  -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=2 --  -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=1 --  -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=1 --  -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 --  -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=[5, 3]

script

FAILED

Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.

ControlFlowScan

forward

def forward(self, x):
    init = torch.zeros_like(x[0])
    carry, out = torch.ops.higher_order.scan(
        ControlFlowScan.add, [init], [x], additional_inputs=[]
    )
    return carry

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[3, 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3])           -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
  Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
  Identity(output_0) -> output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None

custom-dec

FAILED

scan must be captured completely with torch.compile. Scroll up to find out what causes the graph break.

from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 377, in forward
    carry, out = torch.ops.higher_order.scan(

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

FAILED

Unable to symbolically trace HigherOrderOperators

dynamo-ir

FAILED

Failed to decompose the FX graph for ONNX compatibility. This is step 2/3 of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the *torch.export* component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the *onnx* component. Attach the error report and the pt2 model.

## Exception summary

<class 'RuntimeError'>: scan might be aliasing the input or the output!

While executing %scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
GraphModule: class GraphModule(torch.nn.Module):
    def forward(self, x):
        x: "f32[3, 3][3, 1]"; 

        x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:376 in forward, code: init = torch.zeros_like(x[0])
        select: "f32[3][1]" = torch.ops.aten.select.int(x, 0, 0)
        zeros_like: "f32[3][1]" = torch.ops.aten.zeros_like.default(select, pin_memory = False);  select = None
    
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:377 in forward, code: carry, out = torch.ops.higher_order.scan(
        scan_combine_graph_0 = self.scan_combine_graph_0
        scan = torch.ops.higher_order.scan(scan_combine_graph_0, [zeros_like], [x], ());  scan_combine_graph_0 = zeros_like = x = None
        getitem: "f32[3][1]" = scan[0]
        getitem_1: "f32[3, 3][3, 1]" = scan[1];  scan = getitem_1 = None
        return pytree.tree_unflatten((getitem,), self._out_spec)
    
    class scan_combine_graph_0(torch.nn.Module):
        def forward(self, carry_1: "f32[3][1]", y_1: "f32[3][1]"):
             # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:377 in forward, code: carry, out = torch.ops.higher_order.scan(
            add: "f32[3][1]" = torch.ops.aten.add.Tensor(carry_1, y_1);  carry_1 = y_1 = None
            return [add, add]
        

Original traceback:
File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 377, in forward
    carry, out = torch.ops.higher_order.scan(

(Refer to the full stack trace above for more information.)

script

FAILED

could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)

ControlFlowScan2Carried

forward

def forward(self, x):
    init1 = torch.zeros_like(x[0])
    init2 = torch.ones_like(x[0])
    carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
        ControlFlowScan2Carried.add,
        [init1, init2],
        [x, x * 2],
        # dim=0,  # 01/31/2025, not supported anymore
        additional_inputs=[],
    )
    return carry1, carry2, out1, out2

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Mul(x, _reshape_init1_s_0) -> _onx_mul_x0
  Scan(zeros_like, ones_like, x, _onx_mul_x0, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=[3, 4]
output: name='output_3' type=dtype('float32') shape=[3, 4]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
  Identity(output_0) -> output_2
Mul(init_1_ones_like, scan_1_mul) -> output_1
  Identity(output_1) -> output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None

custom-dec

FAILED

scan must be captured completely with torch.compile. Scroll up to find out what causes the graph break.

from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 397, in forward
    carry1, carry2, out1, out2 = torch.ops.higher_order.scan(

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

FAILED

Unable to symbolically trace HigherOrderOperators

dynamo-ir

FAILED

Failed to decompose the FX graph for ONNX compatibility. This is step 2/3 of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the *torch.export* component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the *onnx* component. Attach the error report and the pt2 model.

## Exception summary

<class 'RuntimeError'>: scan might be aliasing the input or the output!

While executing %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
GraphModule: class GraphModule(torch.nn.Module):
    def forward(self, x):
        x: "f32[3, 4][4, 1]"; 

        x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:395 in forward, code: init1 = torch.zeros_like(x[0])
        select: "f32[4][1]" = torch.ops.aten.select.int(x, 0, 0)
        zeros_like: "f32[4][1]" = torch.ops.aten.zeros_like.default(select, pin_memory = False);  select = None
    
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:396 in forward, code: init2 = torch.ones_like(x[0])
        select_1: "f32[4][1]" = torch.ops.aten.select.int(x, 0, 0)
        ones_like: "f32[4][1]" = torch.ops.aten.ones_like.default(select_1, pin_memory = False);  select_1 = None
    
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:400 in forward, code: [x, x * 2],
        mul: "f32[3, 4][4, 1]" = torch.ops.aten.mul.Tensor(x, 2)
    
         # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:397 in forward, code: carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
        scan_combine_graph_0 = self.scan_combine_graph_0
        scan = torch.ops.higher_order.scan(scan_combine_graph_0, [zeros_like, ones_like], [x, mul], ());  scan_combine_graph_0 = zeros_like = ones_like = x = mul = None
        getitem: "f32[4][1]" = scan[0]
        getitem_1: "f32[4][1]" = scan[1]
        getitem_2: "f32[3, 4][4, 1]" = scan[2]
        getitem_3: "f32[3, 4][4, 1]" = scan[3];  scan = None
        return pytree.tree_unflatten((getitem, getitem_1, getitem_2, getitem_3), self._out_spec)
    
    class scan_combine_graph_0(torch.nn.Module):
        def forward(self, carry1_1: "f32[4][1]", carry2_1: "f32[4][1]", y1_1: "f32[4][1]", y2_1: "f32[4][1]"):
             # File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:397 in forward, code: carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
            add: "f32[4][1]" = torch.ops.aten.add.Tensor(carry1_1, y1_1);  carry1_1 = y1_1 = None
            mul: "f32[4][1]" = torch.ops.aten.mul.Tensor(carry2_1, y2_1);  carry2_1 = y2_1 = None
            return [add, mul, add, mul]
        

Original traceback:
File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 397, in forward
    carry1, carry2, out1, out2 = torch.ops.higher_order.scan(

(Refer to the full stack trace above for more information.)

script

FAILED

could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)

ControlFlowScanCDist

forward

def forward(self, x):
    carry, out = torch.ops.higher_order.scan(
        ControlFlowScanCDist.dist,
        [x],
        [x],
        # dim=0,  # 01/31/2025, not supported anymore
        additional_inputs=[],
    )
    return out

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[3, 4]
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Constant(value=[1]) -> init7_s1_12
Constant(value=[1, -1]) -> init7_s2_1_-12
  Reshape(scan_0_x, init7_s2_1_-12) -> reshape2
    Sub(init_0_x, reshape2) -> sub2
      Mul(sub2, sub2) -> mul2
  ReduceSum(mul2, init7_s1_12, keepdims=0) -> sum_12
    Sqrt(sum_12) -> output_1
Identity(init_0_x) -> output_0
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None

custom-dec

FAILED

scan must be captured completely with torch.compile. Scroll up to find out what causes the graph break.

from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 422, in forward
    carry, out = torch.ops.higher_order.scan(

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

FAILED

Unable to symbolically trace HigherOrderOperators

dynamo-ir

FAILED

Failed to convert the exported program to an ONNX model. This is step 3/3 of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the *onnx* component. Attach the error report and the pt2 model.

## Exception summary

<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7feadabb0170>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], ()), kwargs = {}). See the stack trace for more information.

(Refer to the full stack trace above for more information.)

script

FAILED

could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)

ControlFlowScanCDist2

forward

def forward(self, x):
    z = torch.tensor([0], dtype=torch.float32)
    y = x.clone()
    out = torch.ops.higher_order.scan(
        ControlFlowScanCDist2.dist,
        [z],
        [x],
        # dim=0,  # 01/31/2025, not supported anymore
        additional_inputs=[y],
    )
    return out[1]

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Constant(value=[1]) -> init7_s1_12
Constant(value=[1, -1]) -> init7_s2_1_-12
  Reshape(scan_0_x, init7_s2_1_-12) -> reshape2
Sub(hidden_input_scan_0_clone, reshape2) -> sub2
  Mul(sub2, sub2) -> mul2
  ReduceSum(mul2, init7_s1_12, keepdims=0) -> sum_12
    Sqrt(sum_12) -> output_1
Identity(init_0_detach_) -> output_0
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None

custom-dec

FAILED

scan must be captured completely with torch.compile. Scroll up to find out what causes the graph break.

from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 449, in forward
    out = torch.ops.higher_order.scan(

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

FAILED

(CustomProxy(clone),) can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>,)

dynamo-ir

FAILED

Failed to convert the exported program to an ONNX model. This is step 3/3 of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the *onnx* component. Attach the error report and the pt2 model.

## Exception summary

<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7feadabb0170>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%clone], [%x], (%clone_1,)), kwargs = {}). See the stack trace for more information.

(Refer to the full stack trace above for more information.)

script

FAILED

could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)

ControlFlowScanCDistXY

forward

def forward(self, x, y):
    carry, out = torch.ops.higher_order.scan(
        ControlFlowScanCDistXY.dist,
        [y],
        [x],
        # dim=0,  # 01/31/2025, not supported anymore
        additional_inputs=[],
    )
    return out

custom-fallback

opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=[3, 4]
input: name='y' type=dtype('float32') shape=[5, 4]
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 5]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Constant(value=[1]) -> init7_s1_12
Constant(value=[1, -1]) -> init7_s2_1_-12
  Reshape(scan_0_x, init7_s2_1_-12) -> reshape2
    Sub(init_0_y, reshape2) -> sub2
      Mul(sub2, sub2) -> mul2
  ReduceSum(mul2, init7_s1_12, keepdims=0) -> sum_12
    Sqrt(sum_12) -> output_1
Identity(init_0_y) -> output_0
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None

custom-dec

FAILED

scan must be captured completely with torch.compile. Scroll up to find out what causes the graph break.

from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 475, in forward
    carry, out = torch.ops.higher_order.scan(

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

FAILED

Unable to symbolically trace HigherOrderOperators

dynamo-ir

FAILED

Failed to convert the exported program to an ONNX model. This is step 3/3 of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the *onnx* component. Attach the error report and the pt2 model.

## Exception summary

<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7feadabb0170>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], ()), kwargs = {}). See the stack trace for more information.

(Refer to the full stack trace above for more information.)

script

FAILED

could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)

CreateFromShape

forward

def forward(self, x):
    y = torch.ones((x.shape[0], x.shape[1] + 1))
    return y

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5])      -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5])      -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]

custom-tracing

FAILED

ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Constant(value=[[1.0, 1.0...) -> ones
output: name='ones' type=dtype('float32') shape=[4, 5]

script

FAILED

Input mismatch, inputs[0]=(T1r2,) but names=[], model=CreateFromShape, export='script'

CreateFromShapeThroughFunction

forward

def forward(self, x):
    dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
    y = torch.ones((x.shape[0], dy1))
    return y

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5])      -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5])      -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]

custom-tracing

FAILED

ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Constant(value=[[1.0, 1.0...) -> ones
output: name='ones' type=dtype('float32') shape=[4, 5]

script

FAILED

Input mismatch, inputs[0]=(T1r2,) but names=[], model=CreateFromShapeThroughFunction, export='script'

CropLastDimensionWithTensorContent

forward

def forward(self, x, shape):
    return x[..., : shape[0]]

custom-fallback

def forward(self, arg0_1: “f32[3, 4, 4]”, arg1_1: “i64[1]”):

# File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:865 in forward, code: return x[…, : shape[0]]

select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None

def forward(self, arg0_1: “f32[3, 4, 4]”, arg1_1: “i64[1]”):

# File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:865 in forward, code: return x[…, : shape[0]]

select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None

def forward(self, arg0_1: “f32[3, 4, 4]”, arg1_1: “i64[1]”):

# File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:865 in forward, code: return x[…, : shape[0]]

select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None

def forward(self, arg0_1: “f32[3, 4, 4]”, arg1_1: “i64[1]”):

# File: ~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:865 in forward, code: return x[…, : shape[0]]

select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None

def forward(self, arg0_1: “f32[s35, s16, s90]”, arg1_1: “i64[1]”):

# No stacktrace found for following nodes select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None _to_copy: “i32[]” = torch.ops.aten._to_copy.default(select, dtype = torch.int32); select = None _local_scalar_dense: “Sym(u0)” = torch.ops.aten._local_scalar_dense.default(_to_copy); _to_copy = None slice_1 = torch.ops.aten.slice.Tensor(arg0_1, 2, 0, _local_scalar_dense); arg0_1 = _local_scalar_dense = slice_1 = None

def forward(self, arg0_1: “f32[s35, s16, s90]”, arg1_1: “i64[1]”):

# No stacktrace found for following nodes select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None _to_copy: “i32[]” = torch.ops.aten._to_copy.default(select, dtype = torch.int32); select = None _local_scalar_dense: “Sym(u0)” = torch.ops.aten._local_scalar_dense.default(_to_copy); _to_copy = None slice_1 = torch.ops.aten.slice.Tensor(arg0_1, 2, 0, _local_scalar_dense); arg0_1 = _local_scalar_dense = slice_1 = None

FAILED

None of the following options [ExportOptions(strict=True, aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(strict=True, decomposition_table='default', aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(decomposition_table='default', aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(dynamo=True, aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(decomposition_table='default', dynamo=True, aten_as_function={'aten.scaled_dot_product_attention.default'}), ExportOptions(jit=True, aten_as_function={'aten.scaled_dot_product_attention.default'})] worked, args=(T1r3,T7r1), kwargs=None, exception=
-----
[(ExportOptions(strict=True, aten_as_function={'aten.scaled_dot_product_attention.default'}),
  Unsupported('Dynamic slicing with Tensor arguments\n  Explanation: Creating slices with Tensor arguments is not supported. e.g. `l[:x]`, where `x` is a 1-element tensor.\n  Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.\n\n  Developer debug context: SliceVariable start: ConstantVariable(NoneType: None), stop: TensorVariable(), step: ConstantVariable(NoneType: None)\n\n\nfrom user code:\n   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\nSet TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you\'re reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"\n')),
 (ExportOptions(aten_as_function={'aten.scaled_dot_product_attention.default'}),
  GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0).  (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:973 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n  File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\n\nThe error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.')),
 (ExportOptions(strict=True, decomposition_table='default', aten_as_function={'aten.scaled_dot_product_attention.default'}),
  Unsupported('Dynamic slicing with Tensor arguments\n  Explanation: Creating slices with Tensor arguments is not supported. e.g. `l[:x]`, where `x` is a 1-element tensor.\n  Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.\n\n  Developer debug context: SliceVariable start: ConstantVariable(NoneType: None), stop: TensorVariable(), step: ConstantVariable(NoneType: None)\n\n\nfrom user code:\n   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\nSet TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you\'re reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"\n')),
 (ExportOptions(decomposition_table='default', aten_as_function={'aten.scaled_dot_product_attention.default'}),
  GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0).  (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:973 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n  File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\n\nThe error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.')),
 (ExportOptions(dynamo=True, aten_as_function={'aten.scaled_dot_product_attention.default'}),
  Unsupported('Dynamic slicing with Tensor arguments\n  Explanation: Creating slices with Tensor arguments is not supported. e.g. `l[:x]`, where `x` is a 1-element tensor.\n  Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.\n\n  Developer debug context: SliceVariable start: ConstantVariable(NoneType: None), stop: TensorVariable(), step: ConstantVariable(NoneType: None)\n\n\nfrom user code:\n   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\nSet TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you\'re reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"\n')),
 (ExportOptions(decomposition_table='default', dynamo=True, aten_as_function={'aten.scaled_dot_product_attention.default'}),
  Unsupported('Dynamic slicing with Tensor arguments\n  Explanation: Creating slices with Tensor arguments is not supported. e.g. `l[:x]`, where `x` is a 1-element tensor.\n  Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.\n\n  Developer debug context: SliceVariable start: ConstantVariable(NoneType: None), stop: TensorVariable(), step: ConstantVariable(NoneType: None)\n\n\nfrom user code:\n   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward\n    return x[..., : shape[0]]\n\nSet TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you\'re reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"\n')),
 (ExportOptions(jit=True, aten_as_function={'aten.scaled_dot_product_attention.default'}),
  GuardOnDataDependentSymNode('Could not guard on data-dependent expression u0 < 0 (unhinted: u0 < 0).  (Size-like symbols: none)\n\nCaused by: (_decomp/decompositions.py:734 in slice_forward)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n  File "<string>", line 1, in <lambda>\n\n\nWhile executing %slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %_local_scalar_dense_default, 1), kwargs = {})\nGraphModule: class GraphModule(torch.nn.Module):\n    def forward(self, x, shape):\n        # No stacktrace found for following nodes\n        select_int = torch.ops.aten.select.int(shape, 0, 0);  shape = None\n        _to_copy_default = torch.ops.aten._to_copy.default(select_int, dtype = torch.int32);  select_int = None\n        _local_scalar_dense_default = torch.ops.aten._local_scalar_dense.default(_to_copy_default);  _to_copy_default = None\n        slice_tensor = torch.ops.aten.slice.Tensor(x, 2, 0, _local_scalar_dense_default, 1);  x = _local_scalar_dense_default = None\n        return slice_tensor\n        \n\nOriginal traceback:\nNone'))]

custom-dec

FAILED

Dynamic slicing with Tensor arguments
  Explanation: Creating slices with Tensor arguments is not supported. e.g. `l[:x]`, where `x` is a 1-element tensor.
  Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.

  Developer debug context: SliceVariable start: ConstantVariable(NoneType: None), stop: TensorVariable(), step: ConstantVariable(NoneType: None)


from user code:
   File "~/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 865, in forward
    return x[..., : shape[0]]

Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1])      -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1])       -- DynamoInterpreter._getitem_slice.3
Gather(shape, init7_s1_0) -> _onx_gather_shape0
  Slice(x, init7_s1_0, _onx_gather_shape0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s35', 's16', 's90']
input: name='shape' type=dtype('int64') shape=[1]
Constant(value_ints=[1]) -> val_11
Constant(value=[0]) -> val_4
Constant(value=[2]) -> val_7
Constant(value=[2]) -> val_10
  Slice(x, val_4, val_7, val_10, val_11) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['s35', 's16', 2]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[3, 4, 4]
input: name='onnx::Gather_1' type=dtype('int64') shape=[1]
Constant(value=0) -> /Constant_output_0
  Gather(onnx::Gather_1, /Constant_output_0, axis=0) -> /Gather_output_0
Constant(value=[2]) -> /Constant_1_output_0
Constant(value=[0]) -> /Constant_2_output_0
Constant(value=[0]) -> /Constant_3_output_0
  Unsqueeze(/Gather_output_0, /Constant_3_output_0) -> /Unsqueeze_output_0
Constant(value=[1]) -> /Constant_4_output_0
  Slice(onnx::Slice_0, /Constant_2_output_0, /Unsqueeze_output_0, /Constant_1_output_0, /Constant_4_output_0) -> 14
output: name='14' type=dtype('float32') shape=[3, 4, 'Slice14_dim_2']

CropLastDimensionWithTensorShape

forward

def forward(self, x, y):
    return x[..., : y.shape[0]]

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4, 2]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4, 2]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1])      -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1])       -- DynamoInterpreter._getitem_slice.3
Shape(y) -> getattr_1
  Gather(getattr_1, init7_s1_0) -> _onx_gather_getattr_10
    Slice(x, init7_s1_0, _onx_gather_getattr_10, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
Constant(value_ints=[1]) -> val_11
Constant(value=[0]) -> val_3
Constant(value=[2]) -> val_7
Constant(value=[2]) -> val_10
  Slice(x, val_3, val_7, val_10, val_11) -> slice_1
output: name='slice_1' type=dtype('float32') shape=[3, 4, 2]

script

FAILED

Input mismatch, inputs[0]=(T1r3,T1r1) but names=['onnx::Slice_0'], model=CropLastDimensionWithTensorShape, export='script'

InplaceAdd

forward

def forward(self, x):
    x += self.bias
    return x

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
  Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[3, 4]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=[3, 4]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]

script

opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
  Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]

InplaceAdd2

forward

def forward(self, x):
    x.add_(self.bias)
    return x

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
  Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[3, 4]

custom-tracing

FAILED

Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-TAE] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'bias': (1, 4), 'x': (3, 4)}
_known_types={'bias': 1, 'x': 1}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    add_ -> {output}
    bias -> {add_}
    x -> {add_}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
    bias: ('run_node', ('', '')) --- 1:2:(1, 4):
    add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, x):
    bias = self.bias
    add_ = x.add_(bias);  x = bias = None
    return add_

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %bias : [num_users=1] = get_attr[target=bias]
    %add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
    return add_
-- process.progress --
node 2/4 target=add_
--
[GraphBuilder-TAE.make_tensor_input] x[1:3x4]
[GraphBuilder-TAE.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-TAE] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]

script

opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
  Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]

InplaceAdd_Mul

forward

def forward(self, x):
    x.add_(self.bias)
    return x * 2

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
Add(x, c_bias) -> add_
  Mul(add_, _reshape_init1_s_0) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
Add(x, c_bias) -> output_0
  Mul(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[3, 4]

custom-tracing

FAILED

Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-LZS] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'bias': (1, 4), 'x': (3, 4)}
_known_types={'bias': 1, 'x': 1}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    add_ -> {mul}
    bias -> {add_}
    x -> {add_}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
    bias: ('run_node', ('', '')) --- 1:2:(1, 4):
    add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, x):
    bias = self.bias
    add_ = x.add_(bias);  x = bias = None
    mul = add_ * 2;  add_ = None
    return mul

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %bias : [num_users=1] = get_attr[target=bias]
    %add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
    %mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
    return mul
-- process.progress --
node 2/5 target=add_
--
[GraphBuilder-LZS.make_tensor_input] x[1:3x4]
[GraphBuilder-LZS.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-LZS] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
Constant(value=2.0) -> scalar_tensor_default
  Mul(add, scalar_tensor_default) -> mul
output: name='mul' type=dtype('float32') shape=[3, 4]

script

opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
  Add(onnx::Add_0, /Constant_output_0) -> /Add_output_0
Constant(value=2.0) -> /Constant_1_output_0
  Mul(/Add_output_0, /Constant_1_output_0) -> 4
output: name='4' type=dtype('float32') shape=[3, 4]

InplaceCloneAdd

forward

def forward(self, x):
    x = x.clone()
    x.add_(self.bias)
    return x

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]

custom-tracing

FAILED

Unable to interpret method 'aten_meth_add_', args=(clone, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-THG] Message starts, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'bias': (1, 4), 'clone': (3, 4), 'x': (3, 4)}
_known_types={'bias': 1, 'clone': 1, 'x': 1}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    add_ -> {output}
    bias -> {add_}
    clone -> {add_}
    x -> {clone}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
    clone: ('run_node', ('', '')) --- 1:2:(3, 4):
    bias: ('run_node', ('', '')) --- 1:2:(1, 4):
    add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, x):
    clone = x.clone();  x = None
    bias = self.bias
    add_ = clone.add_(bias);  clone = bias = None
    return add_

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
    %bias : [num_users=1] = get_attr[target=bias]
    %add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
    return add_
-- process.progress --
node 3/5 target=add_
--
[GraphBuilder-THG.make_tensor_input] x[1:3x4]
[GraphBuilder-THG.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-THG.make_node] .clone          [#:#   ] Identity:['x']->['clone']
[GraphBuilder-THG] Message completed, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]

script

opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
  Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]

InplaceSetItemEllipsis_1

forward

def forward(self, index, update):
    copy = self.params.clone()
    copy[..., index] = update
    return copy

custom-fallback

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1,  1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([   1, 8192,    4])-- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_32768' type=int64 shape=(1,) -- array([32768])   -- Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s32768_' type=int64 shape=(32768,)                  -- Opset.make_node.0
init: name='_reshape_init7_s1_00' type=int64 shape=(1, 1, 1) -- array([0])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s3_-1_1_1)##init7_s1_0/Opset.make_node.1/Shape##init7_s3_-1_1_1/Opset.make_node.1/Shape
init: name='_reshape_init7_s8192_0' type=int64 shape=(1, 8192, 1)     -- GraphBuilder.constant_folding.from/fold(init7_s3_1_-1_1,init7_s8192_)##init7_s8192_/Opset.make_node.0##init7_s3_1_-1_1/Opset.make_node.1/Shape
init: name='_reshape_clone0' type=float32 shape=(32768,)              -- GraphBuilder.constant_folding.from/fold(c_params,init7_s1_-1)##c_params/DynamoInterpret.placeholder.0##init7_s1_-1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Expand(update, init7_s3_1_8192_4) -> _onx_expand_update0
  Reshape(_onx_expand_update0, init7_s1_-1) -> _reshape_expand_update00
Reshape(index, init7_s3_1_1_-1) -> _reshape_index0
Mul(_reshape_init7_s1_00, init7_s1_32768) -> _onx_mul__reshape_init7_s1_000
  Add(_onx_mul__reshape_init7_s1_000, _reshape_index0) -> add-_onx_mul__reshape_init7_s1_000
Mul(_reshape_init7_s8192_0, init7_s1_4) -> _onx_mul__reshape_init7_s8192_00
  Add(add-_onx_mul__reshape_init7_s1_000, _onx_mul__reshape_init7_s8192_00) -> _onx_add_add_mul__reshape_init7_s1_00000
    Reshape(_onx_add_add_mul__reshape_init7_s1_00000, init7_s1_-1) -> _reshape_add_add_mul__reshape_init7_s1_000000
      GatherElements(init7_s32768_, _reshape_add_add_mul__reshape_init7_s1_000000) -> _onx_gatherelements_init7_s32768_0
    ScatterElements(_reshape_clone0, _onx_gatherelements_init7_s32768_0, _reshape_expand_update00) -> _onx_scatterelements__reshape_clone00
      Reshape(_onx_scatterelements__reshape_clone00, init7_s3_1_8192_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 4]

custom-dec

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1,  1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([   1, 8192,    4])-- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_32768' type=int64 shape=(1,) -- array([32768])   -- Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s32768_' type=int64 shape=(32768,)                  -- Opset.make_node.0
init: name='_reshape_init7_s1_00' type=int64 shape=(1, 1, 1) -- array([0])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s3_-1_1_1)##init7_s1_0/Opset.make_node.1/Shape##init7_s3_-1_1_1/Opset.make_node.1/Shape
init: name='_reshape_init7_s8192_0' type=int64 shape=(1, 8192, 1)     -- GraphBuilder.constant_folding.from/fold(init7_s3_1_-1_1,init7_s8192_)##init7_s8192_/Opset.make_node.0##init7_s3_1_-1_1/Opset.make_node.1/Shape
init: name='_reshape_clone0' type=float32 shape=(32768,)              -- GraphBuilder.constant_folding.from/fold(c_params,init7_s1_-1)##c_params/DynamoInterpret.placeholder.0##init7_s1_-1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Expand(update, init7_s3_1_8192_4) -> _onx_expand_update0
  Reshape(_onx_expand_update0, init7_s1_-1) -> _reshape_expand_update00
Reshape(index, init7_s3_1_1_-1) -> _reshape_index0
Mul(_reshape_init7_s1_00, init7_s1_32768) -> _onx_mul__reshape_init7_s1_000
  Add(_onx_mul__reshape_init7_s1_000, _reshape_index0) -> add-_onx_mul__reshape_init7_s1_000
Mul(_reshape_init7_s8192_0, init7_s1_4) -> _onx_mul__reshape_init7_s8192_00
  Add(add-_onx_mul__reshape_init7_s1_000, _onx_mul__reshape_init7_s8192_00) -> _onx_add_add_mul__reshape_init7_s1_00000
    Reshape(_onx_add_add_mul__reshape_init7_s1_00000, init7_s1_-1) -> _reshape_add_add_mul__reshape_init7_s1_000000
      GatherElements(init7_s32768_, _reshape_add_add_mul__reshape_init7_s1_000000) -> _onx_gatherelements_init7_s32768_0
    ScatterElements(_reshape_clone0, _onx_gatherelements_init7_s32768_0, _reshape_expand_update00) -> _onx_scatterelements__reshape_clone00
      Reshape(_onx_scatterelements__reshape_clone00, init7_s3_1_8192_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 4]

custom-tracing

FAILED

setitem is not implemented when indices=(Ellipsis, 'index') and rank is unknown or not equal to the number of indices
--DEBUG--
[GraphBuilder-UMY] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_tensor_constant0': (1, 8192, 4), 'index': (4,), 'update': (8192, 4)}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_value_shape={}
_known_constants=['_tensor_constant0']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    _tensor_constant0 -> {setitem}
    index -> {setitem}
    setitem -> {output}
    update -> {setitem}
--TORCH-SHAPES--
    index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
    update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
    _tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 4):
    setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, index, update):
    _tensor_constant0 = self._tensor_constant0
    _tensor_constant0[(Ellipsis, index)] = update;  setitem = _tensor_constant0;  _tensor_constant0 = index = update = None
    return setitem

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %index : [num_users=1] = placeholder[target=index]
    %update : [num_users=1] = placeholder[target=update]
    %_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
    %setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
    return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-UMY.make_tensor_input] index[7:4]
[GraphBuilder-UMY.make_tensor_input] update[1:8192x4]
[GraphBuilder-UMY.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-UMY] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.

dynamo-ir

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='params' type=float32 shape=(1, 8192, 4)
init: name='val_6' type=int64 shape=(8192, 4)
init: name='val_12' type=int64 shape=(8192,)
Constant(value=[1]) -> val_9
Constant(value=[8192, 4]) -> val_5
Constant(value=[-1]) -> val_7
  Reshape(val_6, val_7, allowzero=0) -> val_8
  Unsqueeze(val_8, val_9) -> val_10
Constant(value=[8192, 1]) -> val_13
  Reshape(val_12, val_13, allowzero=0) -> val_14
  Expand(val_14, val_5) -> val_15
  Reshape(val_15, val_7, allowzero=0) -> val_16
  Unsqueeze(val_16, val_9) -> val_17
Constant(value=[1, 4]) -> val_19
  Reshape(index, val_19) -> val_20
  Expand(val_20, val_5) -> val_21
  Reshape(val_21, val_7, allowzero=0) -> val_22
  Unsqueeze(val_22, val_9) -> val_23
    Concat(val_10, val_17, val_23, axis=1) -> val_24
  Reshape(update, val_7, allowzero=0) -> val_25
    ScatterND(params, val_24, val_25, reduction=b'none') -> index_put
output: name='index_put' type=dtype('float32') shape=[1, 8192, 4]

script

FAILED

[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("/Add", Add, "", -1) : ("","",) -> ("/Add_output_0",) , Error Node (/Add)'s input 0 is marked single but has an empty string in the graph

InplaceSetItemEllipsis_2

forward

def forward(self, index, update):
    copy = self.params.clone()
    copy[..., index] = update
    return copy

custom-fallback

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1,  1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([   1, 8192,    4])-- Opset.make_node.1/Shape
init: name='init7_s1_49152' type=int64 shape=(1,) -- array([49152])   -- Opset.make_node.1/Shape
init: name='init7_s1_6' type=int64 shape=(1,) -- array([6])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s49152_' type=int64 shape=(49152,)                  -- Opset.make_node.0
init: name='init7_s3_1_8192_6' type=int64 shape=(3,) -- array([   1, 8192,    6])-- Opset.make_node.1/Shape
init: name='_reshape_init7_s1_00' type=int64 shape=(1, 1, 1) -- array([0])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s3_-1_1_1)##init7_s1_0/Opset.make_node.1/Shape##init7_s3_-1_1_1/Opset.make_node.1/Shape
init: name='_reshape_init7_s8192_0' type=int64 shape=(1, 8192, 1)     -- GraphBuilder.constant_folding.from/fold(init7_s3_1_-1_1,init7_s8192_)##init7_s8192_/Opset.make_node.0##init7_s3_1_-1_1/Opset.make_node.1/Shape
init: name='_reshape_clone0' type=float32 shape=(49152,)              -- GraphBuilder.constant_folding.from/fold(c_params,init7_s1_-1)##c_params/DynamoInterpret.placeholder.0##init7_s1_-1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Expand(update, init7_s3_1_8192_4) -> _onx_expand_update0
  Reshape(_onx_expand_update0, init7_s1_-1) -> _reshape_expand_update00
Reshape(index, init7_s3_1_1_-1) -> _reshape_index0
Mul(_reshape_init7_s1_00, init7_s1_49152) -> _onx_mul__reshape_init7_s1_000
  Add(_onx_mul__reshape_init7_s1_000, _reshape_index0) -> add-_onx_mul__reshape_init7_s1_000
Mul(_reshape_init7_s8192_0, init7_s1_6) -> _onx_mul__reshape_init7_s8192_00
  Add(add-_onx_mul__reshape_init7_s1_000, _onx_mul__reshape_init7_s8192_00) -> _onx_add_add_mul__reshape_init7_s1_00000
    Reshape(_onx_add_add_mul__reshape_init7_s1_00000, init7_s1_-1) -> _reshape_add_add_mul__reshape_init7_s1_000000
      GatherElements(init7_s49152_, _reshape_add_add_mul__reshape_init7_s1_000000) -> _onx_gatherelements_init7_s49152_0
    ScatterElements(_reshape_clone0, _onx_gatherelements_init7_s49152_0, _reshape_expand_update00) -> _onx_scatterelements__reshape_clone00
      Reshape(_onx_scatterelements__reshape_clone00, init7_s3_1_8192_6) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 6]

custom-dec

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1,  1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([   1, 8192,    4])-- Opset.make_node.1/Shape
init: name='init7_s1_49152' type=int64 shape=(1,) -- array([49152])   -- Opset.make_node.1/Shape
init: name='init7_s1_6' type=int64 shape=(1,) -- array([6])           -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1])         -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s49152_' type=int64 shape=(49152,)                  -- Opset.make_node.0
init: name='init7_s3_1_8192_6' type=int64 shape=(3,) -- array([   1, 8192,    6])-- Opset.make_node.1/Shape
init: name='_reshape_init7_s1_00' type=int64 shape=(1, 1, 1) -- array([0])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s3_-1_1_1)##init7_s1_0/Opset.make_node.1/Shape##init7_s3_-1_1_1/Opset.make_node.1/Shape
init: name='_reshape_init7_s8192_0' type=int64 shape=(1, 8192, 1)     -- GraphBuilder.constant_folding.from/fold(init7_s3_1_-1_1,init7_s8192_)##init7_s8192_/Opset.make_node.0##init7_s3_1_-1_1/Opset.make_node.1/Shape
init: name='_reshape_clone0' type=float32 shape=(49152,)              -- GraphBuilder.constant_folding.from/fold(c_params,init7_s1_-1)##c_params/DynamoInterpret.placeholder.0##init7_s1_-1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Expand(update, init7_s3_1_8192_4) -> _onx_expand_update0
  Reshape(_onx_expand_update0, init7_s1_-1) -> _reshape_expand_update00
Reshape(index, init7_s3_1_1_-1) -> _reshape_index0
Mul(_reshape_init7_s1_00, init7_s1_49152) -> _onx_mul__reshape_init7_s1_000
  Add(_onx_mul__reshape_init7_s1_000, _reshape_index0) -> add-_onx_mul__reshape_init7_s1_000
Mul(_reshape_init7_s8192_0, init7_s1_6) -> _onx_mul__reshape_init7_s8192_00
  Add(add-_onx_mul__reshape_init7_s1_000, _onx_mul__reshape_init7_s8192_00) -> _onx_add_add_mul__reshape_init7_s1_00000
    Reshape(_onx_add_add_mul__reshape_init7_s1_00000, init7_s1_-1) -> _reshape_add_add_mul__reshape_init7_s1_000000
      GatherElements(init7_s49152_, _reshape_add_add_mul__reshape_init7_s1_000000) -> _onx_gatherelements_init7_s49152_0
    ScatterElements(_reshape_clone0, _onx_gatherelements_init7_s49152_0, _reshape_expand_update00) -> _onx_scatterelements__reshape_clone00
      Reshape(_onx_scatterelements__reshape_clone00, init7_s3_1_8192_6) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 6]

custom-tracing

FAILED

setitem is not implemented when indices=(Ellipsis, 'index') and rank is unknown or not equal to the number of indices
--DEBUG--
[GraphBuilder-SRG] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_tensor_constant0': (1, 8192, 6), 'index': (4,), 'update': (8192, 4)}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_value_shape={}
_known_constants=['_tensor_constant0']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    _tensor_constant0 -> {setitem}
    index -> {setitem}
    setitem -> {output}
    update -> {setitem}
--TORCH-SHAPES--
    index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
    update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
    _tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 6):
    setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, index, update):
    _tensor_constant0 = self._tensor_constant0
    _tensor_constant0[(Ellipsis, index)] = update;  setitem = _tensor_constant0;  _tensor_constant0 = index = update = None
    return setitem

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %index : [num_users=1] = placeholder[target=index]
    %update : [num_users=1] = placeholder[target=update]
    %_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
    %setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
    return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-SRG.make_tensor_input] index[7:4]
[GraphBuilder-SRG.make_tensor_input] update[1:8192x4]
[GraphBuilder-SRG.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-SRG] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.

dynamo-ir

opset: domain='' version=18
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='params' type=float32 shape=(1, 8192, 6)
init: name='val_6' type=int64 shape=(8192, 4)
init: name='val_12' type=int64 shape=(8192,)
Constant(value=[1]) -> val_9
Constant(value=[8192, 4]) -> val_5
Constant(value=[-1]) -> val_7
  Reshape(val_6, val_7, allowzero=0) -> val_8
  Unsqueeze(val_8, val_9) -> val_10
Constant(value=[8192, 1]) -> val_13
  Reshape(val_12, val_13, allowzero=0) -> val_14
  Expand(val_14, val_5) -> val_15
  Reshape(val_15, val_7, allowzero=0) -> val_16
  Unsqueeze(val_16, val_9) -> val_17
Constant(value=[1, 4]) -> val_19
  Reshape(index, val_19) -> val_20
  Expand(val_20, val_5) -> val_21
  Reshape(val_21, val_7, allowzero=0) -> val_22
  Unsqueeze(val_22, val_9) -> val_23
    Concat(val_10, val_17, val_23, axis=1) -> val_24
  Reshape(update, val_7, allowzero=0) -> val_25
    ScatterND(params, val_24, val_25, reduction=b'none') -> index_put
output: name='index_put' type=dtype('float32') shape=[1, 8192, 6]

script

FAILED

[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("/Add", Add, "", -1) : ("","",) -> ("/Add_output_0",) , Error Node (/Add)'s input 0 is marked single but has an empty string in the graph

InplaceSetItemMask

forward

def forward(self, x):
    mask = x.to(bool)
    x[mask] = 2
    return x

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
  Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 3]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> _to_copy
  Where(_to_copy, c_lifted_tensor_0, x) -> output_0
    Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[2, 3, 3]

custom-tracing

FAILED

Unexpected type <class 'int'> for name.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 3, 3]
Cast(x, to=9) -> _to_copy
Constant(value=2.0) -> clone
  Where(_to_copy, clone, x) -> index_put
output: name='index_put' type=dtype('float32') shape=[2, 3, 3]

script

opset: domain='' version=17
input: name='onnx::Cast_0' type=dtype('float32') shape=[2, 3, 3]
Cast(onnx::Cast_0, to=9) -> /Cast_output_0
  Cast(/Cast_output_0, to=9) -> /Cast_1_output_0
Constant(value=2.0) -> /Constant_output_0
  Where(/Cast_1_output_0, /Constant_output_0, onnx::Cast_0) -> 4
output: name='4' type=dtype('float32') shape=[2, 3, 3]

InplaceSetItemSquare

forward

def forward(self, x):
    x[:2, :3] = 1
    return x

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]

custom-tracing

FAILED

Unexpected type <class 'int'> for name.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
  Slice(x, val_26, val_29, val_32, val_33) -> slice_3
    Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
  ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
    Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
  ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=[5, 5]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
  Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
  Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
    Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
  Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
  ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
  Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
  Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
  Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
  Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
  Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
  ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
  Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
  Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
  Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
  Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
  Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
    Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
  Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
  Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
    Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
      ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> 58
output: name='58' type=dtype('float32') shape=[5, 5]

InplaceSetItemSquareAdd

forward

def forward(self, x):
    x[:2, :3] = 1
    return x + 2

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Add(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Add(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]

custom-tracing

FAILED

Unexpected type <class 'int'> for name.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
  Slice(x, val_26, val_29, val_32, val_33) -> slice_3
    Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
  ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
    Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
  ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
  Add(slice_scatter_1, scalar_tensor_default) -> add
output: name='add' type=dtype('float32') shape=[5, 5]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
  Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
  Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
    Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
  Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
  ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
  Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
  Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
  Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
  Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
  Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
  ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
  Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
  Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
  Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
  Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
  Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
    Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
  Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
  Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
    Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
      ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> /ScatterND_output_0
Constant(value=2.0) -> /Constant_20_output_0
  Add(/ScatterND_output_0, /Constant_20_output_0) -> 60
output: name='60' type=dtype('float32') shape=[5, 5]

InplaceSetItemSquareAdd2

forward

def forward(self, x):
    x[:2, :3] = 1
    return x + 2, x + 3

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_20' type=float32 shape=(1,) -- array([3.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Add(output_0, _reshape_init1_s_0) -> output_1
Add(output_0, _reshape_init1_s_20) -> output_2
output: name='output_1' type=dtype('float32') shape=[5, 5]
output: name='output_2' type=dtype('float32') shape=[5, 5]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init7_s3_0_1_20' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_onx_transpose_fill0' type=float32 shape=(3, 2)           -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='_reshape_init7_s2_0_10' type=int64 shape=(2, 1) -- array([0, 1])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s2_0_1)##init7_s2_0_1/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_0' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='_reshape_init1_s_20' type=float32 shape=(1,) -- array([3.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
  Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
    ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
      Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
        ScatterND(x, _reshape_init7_s2_0_10, slice_scatter) -> output_0
          Add(output_0, _reshape_init1_s_0) -> output_1
Add(output_0, _reshape_init1_s_20) -> output_2
output: name='output_1' type=dtype('float32') shape=[5, 5]
output: name='output_2' type=dtype('float32') shape=[5, 5]

custom-tracing

FAILED

Unexpected type <class 'int'> for name.

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
  Slice(x, val_26, val_29, val_32, val_33) -> slice_3
    Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
  ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
    Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
  ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
  Add(slice_scatter_1, scalar_tensor_default) -> add
Constant(value=3.0) -> scalar_tensor_default_1
  Add(slice_scatter_1, scalar_tensor_default_1) -> add_1
output: name='add' type=dtype('float32') shape=[5, 5]
output: name='add_1' type=dtype('float32') shape=[5, 5]

script

opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
  Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
  Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
    Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
  Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
  ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
  Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
  Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
  Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
  Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
  Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
  ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
  Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
  Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
  Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
  Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
  Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
    Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
  Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
  Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
    Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
      ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> /ScatterND_output_0
Constant(value=2.0) -> /Constant_20_output_0
  Add(/ScatterND_output_0, /Constant_20_output_0) -> 60
Constant(value=3.0) -> /Constant_21_output_0
  Add(/ScatterND_output_0, /Constant_21_output_0) -> 62
output: name='60' type=dtype('float32') shape=[5, 5]
output: name='62' type=dtype('float32') shape=[5, 5]

SignatureFloat1

forward

def forward(self, x, alpha: float = 2.0):
    return torch.sigmoid(self.linear(x)) - self.buff * alpha

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='mul' type=float32 shape=(1,) -- array([0.75], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_mul_b_buff0)##_onx_mul_b_buff0/
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.301,  0.225, -0.31 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.321], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='mul' type=float32 shape=(1,) -- array([0.75], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_mul_b_buff0)##_onx_mul_b_buff0/
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.113,  0.007, -0.153], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.142], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

FAILED

Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-HXG] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_sub_Linear__onx_matmul_input_10': (4, 1),
 '_sub_Linear__onx_transpose_weight0': (3, 1),
 '_sub_Linear_input_1': (4, 3),
 '_sub_Linear_linear': (4, 1),
 '_sub_Linear_output': (4, 1),
 'alpha': (),
 'buff': (1,),
 'linear': (4, 1),
 'linear.bias': (1,),
 'linear.weight': (1, 3),
 'sigmoid': (4, 1),
 'x': (4, 3)}
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
 '_sub_Linear__onx_transpose_weight0': 1,
 '_sub_Linear_input_1': 1,
 '_sub_Linear_linear': 1,
 '_sub_Linear_output': 1,
 'alpha': 1,
 'buff': 1,
 'linear': 1,
 'linear.bias': 1,
 'linear.weight': 1,
 'sigmoid': 1,
 'x': 1}
_known_value_shape={}
_known_constants=['_sub_Linear__onx_transpose_weight0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    alpha -> {mul}
    buff -> {mul}
    linear -> {sigmoid}
    mul -> {sub}
    sigmoid -> {sub}
    x -> {linear}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
    alpha: ('run_node', (('example_value', torch.float32, torch.Size([])), '')) --- 1:0:():
    linear: ('run_node', ('', '')) --- 1:2:(4, 1):
    sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
    buff: ('run_node', ('', '')) --- 1:1:(1,):
    mul: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule(
  (linear): Linear(in_features=3, out_features=1, bias=True)
)



def forward(self, x, alpha : float = 2.0):
    linear = self.linear(x);  x = None
    sigmoid = torch.sigmoid(linear);  linear = None
    buff = self.buff
    mul = buff.mul(alpha);  buff = alpha = None
    sub = sigmoid - mul;  sigmoid = mul = None
    return sub

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
    %linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
    %sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
    %buff : [num_users=1] = get_attr[target=buff]
    %mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
    %sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
    return sub
-- process.progress --
node 5/8 target=mul
--
[GraphBuilder-HXG.make_tensor_input] x[1:4x3]
[GraphBuilder-HXG.make_tensor_input] alpha[1:]
[GraphBuilder-HXG.make_initializer] linear.weight[torch.float32:torch.float32:[0.41223570704460144, 0.5759496092796326, -0.40965598821640015]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-HXG.make_initializer] linear.bias[torch.float32:torch.float32:[-0.11819536238908768]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-HXG.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-HXG.make_node] .make_nodes     [#:#   ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-HXG.make_node] linear          [#:#   ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-HXG.make_node] Opset           [##:#  ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-HXG.make_node] Opset2          [##:#  ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-HXG.make_node] .output         [#:#   ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-HXG.make_node] .make_nodes2    [#:#   ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-HXG.make_node] sigmoid         [#:#   ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-HXG] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.

dynamo-ir

FAILED

Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'

script

FAILED

[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Unexpected input data type. Actual: (tensor(float)) , expected: (tensor(double))

SignatureInt1

forward

def forward(self, x, i: int = 2):
    return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.041,  0.264,  0.188], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.545], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
  Add(sub, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.453, -0.148,  0.174], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.253], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
  Add(sub, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

FAILED

[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Concat node. Name:'_getitem_slicenSD' Status Message: /home/xadupre/github/onnxruntime/onnxruntime/core/providers/cpu/tensor/concat.cc:139 onnxruntime::common::Status onnxruntime::ConcatBase::PrepareForCompute(onnxruntime::OpKernelContext*, const InlinedTensorsVector&, onnxruntime::Prepare&) const input_rank == reference_rank was false. Ranks of input data are different, cannot concatenate them. expected rank: 1 got: 2

dynamo-ir

FAILED

Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'

script

FAILED

[ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Slice node. Name:'/Slice' Status Message: slice.cc:195 FillVectorsFromInput Starts must be a 1-D array

SignatureInt2

forward

def forward(self, x, i: int = 2):
    return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1])              -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.498,  0.444, -0.566], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.27], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
  Add(sub, select) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1])              -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.499,  0.084, -0.347], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.424], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
  Add(sub, select) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]

custom-tracing

FAILED

One index is given as an integer i but this requires to append a node 'Squeeze' after this one and this is not yet implemented. You can replace the integer by `i:i+1`
--DEBUG--
[GraphBuilder-SIY] Message starts, there are 4 initializers, 8 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'_sub_Linear__onx_matmul_input_10': (4, 1),
 '_sub_Linear__onx_transpose_weight0': (3, 1),
 '_sub_Linear_input_1': (4, 3),
 '_sub_Linear_linear': (4, 1),
 '_sub_Linear_output': (4, 1),
 'buff': (1,),
 'getitem_axis': (2,),
 'i': (),
 'linear': (4, 1),
 'linear.bias': (1,),
 'linear.weight': (1, 3),
 'sigmoid': (4, 1),
 'sub': (4, 1),
 'x': (4, 3)}
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
 '_sub_Linear__onx_transpose_weight0': 1,
 '_sub_Linear_input_1': 1,
 '_sub_Linear_linear': 1,
 '_sub_Linear_output': 1,
 'buff': 1,
 'getitem_axis': 7,
 'i': 7,
 'linear': 1,
 'linear.bias': 1,
 'linear.weight': 1,
 'sigmoid': 1,
 'sub': 1,
 'x': 1}
_known_value_shape={}
_known_constants=['_sub_Linear__onx_transpose_weight0',
 'buff',
 'getitem_axis',
 'linear.bias',
 'linear.weight']
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    buff -> {sub}
    getitem -> {add}
    i -> {getitem}
    linear -> {sigmoid}
    sigmoid -> {sub}
    sub -> {add}
    x -> {linear, getitem}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
    i: ('run_node', (('example_value', torch.int64, torch.Size([])), '')) --- 7:0:():
    linear: ('run_node', ('', '')) --- 1:2:(4, 1):
    sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
    buff: ('run_node', ('', '')) --- 1:1:(1,):
    sub: ('run_node', ('', '')) --- 1:2:(4, 1):
    getitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule(
  (linear): Linear(in_features=3, out_features=1, bias=True)
)



def forward(self, x, i : int = 2):
    linear = self.linear(x)
    sigmoid = torch.sigmoid(linear);  linear = None
    buff = self.buff
    sub = sigmoid - buff;  sigmoid = buff = None
    getitem = x[(slice(None, None, None), i)];  x = i = None
    add = sub + getitem;  sub = getitem = None
    return add

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=2] = placeholder[target=x]
    %i : int [num_users=1] = placeholder[target=i](default=2)
    %linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
    %sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
    %buff : [num_users=1] = get_attr[target=buff]
    %sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
    %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
    %add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
    return add
-- process.progress --
node 6/9 target=<built-in function getitem>
--
[GraphBuilder-SIY.make_tensor_input] x[1:4x3]
[GraphBuilder-SIY.make_tensor_input] i[7:]
[GraphBuilder-SIY.make_initializer] linear.weight[torch.float32:torch.float32:[-0.1787819117307663, 0.4661900997161865, 0.10162513703107834]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-SIY.make_initializer] linear.bias[torch.float32:torch.float32:[-0.5469270944595337]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-SIY.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-SIY.make_initializer] getitem_axis[int64:int64:[0, 1]] - SOURCE: DynamoInterpreter._getitem_slice.axis.1
[GraphBuilder-SIY.make_node] .make_nodes     [#:#   ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-SIY.make_node] linear          [#:#   ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-SIY.make_node] Opset           [##:#  ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-SIY.make_node] Opset2          [##:#  ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-SIY.make_node] .output         [#:#   ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-SIY.make_node] .make_nodes2    [#:#   ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-SIY.make_node] sigmoid         [#:#   ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-SIY.make_node] sub             [##:#  ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-SIY] Message completed, there are 4 initializers, 8 nodes, 2 inputs, 2 outputs.

dynamo-ir

FAILED

Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='dynamo-ir'

script

opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Gather_1' type=dtype('int64') shape=None
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.139,  0.497,  0.077], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.16], dtype=float32)
Gather(onnx::Gemm_0, onnx::Gather_1, axis=1) -> /Gather_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
  Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
    Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
  Add(/Sub_output_0, /Gather_output_0) -> 9
output: name='9' type=dtype('float32') shape=[4, 4]

SignatureListFixedLength

forward

def forward(self, x, lx: list):
    return (
        torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
    )

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.308, -0.055, -0.001], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.513], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([0.038, 0.001, 0.366], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.194], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(lx_0, sum_1) -> mul
    Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx' type='NOTENSOR' shape=None
init: name='linear.bias' type=float32 shape=(1,) -- array([0.366], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_0' type=int64 shape=() -- array([0])              -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1])              -- DynamoInterpreter.getitem.1
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_sub_Linear__onx_transpose_weight0' type=float32 shape=(1, 3) -- array([0.019, 0.364, 0.445], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_Linear__onx_transpose_weight0,init7_s2_1_-1)##_sub_Linear__onx_transpose_weight0/GraphBuilder.constant_folding.from/fold(linear.weight)##linear.weight/GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_Linear__onx_transpose_weight0, linear.bias, transB=1) -> _sub_Linear_linear
  Sigmoid(_sub_Linear_linear) -> sigmoid
    Sub(sigmoid, buff) -> sub
SequenceAt(lx, init7_s_0) -> getitem
SequenceAt(lx, init7_s_1) -> getitem_1
  ReduceSum(getitem_1, init7_s1_1, keepdims=1) -> sum_1
  Mul(getitem, sum_1) -> mul
    Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=[4, 1]

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.142, -0.439, -0.412], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.125], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
  ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
    Mul(lx_0, sum_1) -> mul
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, buff) -> sub
      Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]

script

opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.467, -0.355,  0.536], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.187], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_9
  ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_9, keepdims=1) -> /ReduceSum_output_0
    Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
  Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
    Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
      Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]

SignatureListFixedWithNone

forward

def forward(self, lx):
    x = lx[0]
    if lx[1] is not None:
        x += lx[1]
    if lx[2] is not None:
        x += lx[2]
    return x

custom-fallback

FAILED

Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['lx_0', 'lx_1'], model=SignatureListFixedWithNone, export='custom-fallback'

custom-dec

FAILED

Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['lx_0', 'lx_1'], model=SignatureListFixedWithNone, export='custom-dec'

custom-tracing

FAILED

Unable to create an input 'lx' with type #3[T1r2,T1r2,None]
--DEBUG--
[GraphBuilder-KDC] Message starts, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={}
_known_types={}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    lx -> {getitem_2, getitem_4, getitem_1, getitem_3, getitem}
--TORCH-SHAPES--
    lx: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule()



def forward(self, lx):
    getitem = lx[0]
    getitem_1 = lx[1];  getitem_1 = None
    getitem_2 = lx[1]
    add = getitem + getitem_2;  getitem = getitem_2 = None
    getitem_3 = lx[2];  getitem_3 = None
    getitem_4 = lx[2];  lx = None
    add_1 = add + getitem_4;  add = getitem_4 = None
    return add_1

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %lx : [num_users=5] = placeholder[target=lx]
    %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 0), kwargs = {})
    %getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
    %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
    %add : [num_users=1] = call_function[target=operator.add](args = (%getitem, %getitem_2), kwargs = {})
    %getitem_3 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
    %getitem_4 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
    %add_1 : [num_users=1] = call_function[target=operator.add](args = (%add, %getitem_4), kwargs = {})
    return add_1
-- process.progress --
node 0/9 target=lx
--
[GraphBuilder-KDC] Message completed, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.

dynamo-ir

FAILED

Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['lx_0', 'lx_1'], model=SignatureListFixedWithNone, export='dynamo-ir'

script

FAILED

Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['onnx::Add_0', 'onnx::Add_1'], model=SignatureListFixedWithNone, export='script'

SignatureListVariableLength

forward

def forward(self, x, lx: list):
    t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
    return torch.sigmoid(self.linear(x)) - self.buff + t

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.061, -0.335,  0.526], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.494], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
  ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
    Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.154,  0.037, -0.227], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.397], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
  ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, b_buff) -> sub
    Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]

custom-tracing

FAILED

Type is unknown for result 'l', known_types={'x': 1}
--DEBUG--
[GraphBuilder-GBG] Message starts, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs.
--SHAPE--
_dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_dimensions_source_flat=None
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=None
_known_shapes={'x': (4, 3)}
_known_types={'x': 1}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
    cat -> {sum_1}
    lx -> {cat}
    x -> {linear}
--TORCH-SHAPES--
    x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
    lx: ('run_node', ('', '')) --- :::
    cat: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(strict=True, tracing=True, aten_as_function={'aten.scaled_dot_product_attention.default'})
-- process.graph_module --
GraphModule(
  (linear): Linear(in_features=3, out_features=1, bias=True)
)



def forward(self, x, lx : list):
    cat = torch.cat(lx, 1);  lx = None
    sum_1 = cat.sum(axis = 1, keepdim = True);  cat = None
    linear = self.linear(x);  x = None
    sigmoid = torch.sigmoid(linear);  linear = None
    buff = self.buff
    sub = sigmoid - buff;  sigmoid = buff = None
    add = sub + sum_1;  sub = sum_1 = None
    return add

# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
    %x : [num_users=1] = placeholder[target=x]
    %lx : list [num_users=1] = placeholder[target=lx]
    %cat : [num_users=1] = call_function[target=torch.cat](args = (%lx, 1), kwargs = {})
    %sum_1 : [num_users=1] = call_method[target=sum](args = (%cat,), kwargs = {axis: 1, keepdim: True})
    %linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
    %sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
    %buff : [num_users=1] = get_attr[target=buff]
    %sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
    %add : [num_users=1] = call_function[target=operator.add](args = (%sub, %sum_1), kwargs = {})
    return add
-- process.progress --
node 2/10 target=<built-in method cat of type object at 0x7febd8cf5320>
--
[GraphBuilder-GBG.make_tensor_input] x[1:4x3]
[GraphBuilder-GBG.make_tensor_input] lx[0:]
[GraphBuilder-GBG] Message completed, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs..

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.295, -0.251,  0.408], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.571], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Concat(lx_0, lx_1, axis=1) -> cat
Constant(value=[1]) -> val_3
  ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
  Sigmoid(linear) -> sigmoid
    Sub(sigmoid, buff) -> sub
    Add(sub, sum_1) -> add
output: name='add' type=dtype('float32') shape=[4, 1]

script

opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Concat_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::Concat_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.346,  0.391, -0.332], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.327], dtype=float32)
Concat(onnx::Concat_1, onnx::Concat_2, axis=1) -> /Concat_output_0
Constant(value=[1]) -> onnx::ReduceSum_7
  ReduceSum(/Concat_output_0, onnx::ReduceSum_7, keepdims=1) -> /ReduceSum_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
  Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
    Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
    Add(/Sub_output_0, /ReduceSum_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]

SignatureShapeAsIndex

forward

def forward(self, x, y):
    t = torch.sigmoid(self.linear(x)) + x
    return t[:, : y.shape[1]]

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([-0.494,  0.393, -0.464], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.323], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Add(sigmoid, x) -> add
      Slice(add, init7_s1_0, init7_s1_2, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 2]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0])           -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2])           -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1])           -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_onx_transpose_p_linear_weight0' type=float32 shape=(1, 3) -- array([ 0.094, -0.416, -0.57 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_transpose_p_linear_weight0,init7_s2_1_-1)##_onx_transpose_p_linear_weight0/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.219], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
  Sigmoid(linear) -> sigmoid
    Add(sigmoid, x) -> add
      Slice(add, init7_s1_0, init7_s1_2, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 2]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.299], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='getitem_1_axis' type=int64 shape=(2,) -- array([0, 1])    -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_start' type=int64 shape=(2,) -- array([0, 0])   -- DynamoInterpreter._getitem_slice.2
init: name='getitem_1_step' type=int64 shape=(2,) -- array([1, 1])    -- DynamoInterpreter._getitem_slice.3
init: name='_onx_concat_init7_s1_40' type=int64 shape=(2,) -- array([4, 2])-- GraphBuilder.constant_folding.from/fold(_onx_unsqueeze_getitem0,init7_s1_4)##init7_s1_4/Opset.make_node.1/Shape##_onx_unsqueeze_getitem0/
init: name='GemmTransposePattern--_sub_Linear__onx_transpose_weight0' type=float32 shape=(1, 3) -- array([-0.432, -0.468,  0.165], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_Linear__onx_transpose_weight0,init7_s2_1_-1)##_sub_Linear__onx_transpose_weight0/GraphBuilder.constant_folding.from/fold(linear.weight)##linear.weight/GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)##init7_s2_1_-1/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_Linear__onx_transpose_weight0, linear.bias, transB=1) -> _sub_Linear_linear
  Sigmoid(_sub_Linear_linear) -> sigmoid
    Add(sigmoid, x) -> add
      Slice(add, getitem_1_start, _onx_concat_init7_s1_40, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']

dynamo-ir

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.576,  0.269,  0.155], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.117], dtype=float32)
Constant(value_ints=[0]) -> val_7
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
  Sigmoid(linear) -> sigmoid
    Add(sigmoid, x) -> add
Constant(value=[2]) -> val_11
Constant(value=[1]) -> val_15
Constant(value_ints=[1]) -> val_16
  Slice(add, val_7, val_11, val_15, val_16) -> slice_2
output: name='slice_2' type=dtype('float32') shape=[4, 2]

script

FAILED

Input mismatch, inputs[0]=(T1r2,T1r2) but names=['onnx::Gemm_0'], model=SignatureShapeAsIndex, export='script'

TypeBFloat16

forward

def forward(self, x):
    xb = x.to(torch.bfloat16)
    return (xb + xb).to(torch.float32)

custom-fallback

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Add(x, x) -> add-x
  Cast(add-x, to=16) -> add
    Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]

custom-dec

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Add(x, x) -> add-x
  Cast(add-x, to=16) -> add
    Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]

custom-tracing

opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Add(x, x) -> add-x
  Cast(add-x, to=16) -> add
    Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=[4, 4]

dynamo-ir

FAILED

[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_Add_1'

script

FAILED

[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name '/Add'

Summary