Exported ONNX with Static Shapes¶
The following script shows the exported program for many short cases
and various way to retrieve the torch.fx.Graph
equivalent
to the original model.
<<<
import inspect
import textwrap
import pandas
from experimental_experiment.torch_interpreter.eval import discover, run_exporter
from experimental_experiment.ext_test_case import unit_test_going
from experimental_experiment.helpers import pretty_onnx
cases = discover()
print()
print(":ref:`Summary <lo-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <lo-model-case-export-{name}>`")
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _lo-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print("forward")
print("+++++++")
print()
print("::")
print()
print(
textwrap.indent(textwrap.dedent(inspect.getsource(cls_model.forward)), " ")
)
print()
for exporter in (
"custom-fallback",
"custom-tracing",
"dynamo-ir",
"script",
):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, False, quiet=True)
case_ref = f":ref:`{name} <lo-model-case-export-{name}>`"
if "exported" in res:
print("::")
print()
print(textwrap.indent(pretty_onnx(res["onnx"]), " "))
print()
obs.append(dict(case=case_ref, error="", exporter=exporter))
else:
print("**FAILED**")
print()
print("::")
print()
print(textwrap.indent(str(res["error"]), " "))
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=exporter))
print()
print(".. _lo-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided¶
forward¶
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,) -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> _onx_reshape0
Gather(_onx_reshape0, init7_s128_) -> _onx_gather0
Reshape(_onx_gather0, init7_s4_2_2_8_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 8, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,) -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> _onx_reshape0
Gather(_onx_reshape0, init7_s128_) -> _onx_gather0
Reshape(_onx_gather0, init7_s4_2_2_8_4) -> output
output: name='output' type=dtype('float32') shape=[2, 2, 8, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[2, 2, 8, 8]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[2, 2, 8, ...) -> val_0
Constant(value=[128, 8, 1...) -> val_1
Constant(value=[4]) -> rank_tensor
Constant(value_int=0) -> indices
SequenceEmpty() -> one_seq
Constant(value_int=4) -> rank_0
Loop(rank_0, , one_seq, indices, body=G1) -> one_seq_16, indices_17
Constant(value_ints=[-1]) -> tmp_18
Reshape(x, tmp_18) -> self_flatten
Constant(value_int=0) -> storage_offset
CastLike(storage_offset, indices_17) -> storage_offset_cast
Add(indices_17, storage_offset_cast) -> indices_19
Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,one_seq_1,indices_2 -> cond_out,one_seq_15,indices_13
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='one_seq_1' type='NOTENSOR' shape=None
input: name='indices_2' type='NOTENSOR' shape=None
Constant(value_floats=[1.0]) -> tmp_14
SequenceInsert(one_seq_1, tmp_14) -> one_seq_15
Constant(value=4) -> rank_3_cast
Sub(rank_3_cast, i) -> tmp
Constant(value=1) -> int64_1_cast
Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
Expand(indices_2, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Constant(value=0) -> int64_0_cast
Constant(value=1) -> int64_1_5_cast
Range(int64_0_cast, size_dim_j, int64_1_5_cast) -> tmp_6
Mul(tmp_6, stride_dim_j) -> add_value
Constant(value=0) -> int64_0_7_cast
Equal(i, int64_0_7_cast) -> cond
If(cond, then_branch=G2, else_branch=G3) -> shape_11
Reshape(add_value, shape_11) -> add_value_12
Add(indices_4, add_value_12) -> indices_13
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
output: name='indices_13' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
script¶
opset: domain='' version=17
input: name='onnx::Reshape_0' type=dtype('float32') shape=[2, 2, 8, 8]
Constant(value=[-1]) -> /Constant_output_0
Reshape(onnx::Reshape_0, /Constant_output_0, allowzero=0) -> /Reshape_output_0
Constant(value=[[[[0, 1, ...) -> /Constant_1_output_0
Gather(/Reshape_output_0, /Constant_1_output_0) -> 4
output: name='4' type=dtype('float32') shape=[2, 2, 8, 4]
AtenInterpolate¶
forward¶
def forward(self, x):
y = torch.nn.functional.interpolate(
x,
scale_factor=2.0,
mode="bilinear",
recompute_scale_factor=False,
)
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
init: name='init7_s2_2_2' type=int64 shape=(2,) -- array([2, 2]) -- _aten_upsample_output_size.batch_channel
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Concat(init7_s2_2_2, init7_s2_6_8, axis=0) -> _onx_concat0
Resize(x, , , _onx_concat0, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 6, 8]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
init: name='init7_s2_2_2' type=int64 shape=(2,) -- array([2, 2]) -- _aten_upsample_output_size.batch_channel
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Concat(init7_s2_2_2, init7_s2_6_8, axis=0) -> _onx_concat0
Resize(x, , , _onx_concat0, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
Constant(value_floats=[1.0,1.0,2.0,2.0]) -> val_0
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=[2, 2, 6, 8]
script¶
opset: domain='' version=17
input: name='x' type=dtype('float32') shape=[2, 2, 3, 4]
Constant(value=[1.0, 1.0,...) -> /Constant_output_0
Resize(x, , /Constant_output_0, coordinate_transformation_mode=b'half_pixel', cubic_coeff_a=-0.75, mode=b'linear', nearest_mode=b'floor') -> 5
output: name='5' type=dtype('float32') shape=[2, 2, 6, 8]
AtenNonZero¶
forward¶
def forward(self, x):
y = torch.nonzero(x)
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> _onx_nonzero0
Transpose(_onx_nonzero0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['u1', 2]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> _onx_nonzero0
Transpose(_onx_nonzero0, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['', 2]
script¶
opset: domain='' version=17
input: name='onnx::NonZero_0' type=dtype('float32') shape=[3, 4]
NonZero(onnx::NonZero_0) -> /NonZero_output_0
Transpose(/NonZero_output_0, perm=[1,0]) -> 2
output: name='2' type=dtype('int64') shape=['Transpose2_dim_0', 2]
AtenNonZeroTuple¶
forward¶
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1##shape_type_compute._cast_inputs.1(ge)
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
NonZero(x) -> _onx_nonzero0
SplitToSequence(_onx_nonzero0, axis=0, keepdims=0) -> nonzero_numpy
SequenceAt(nonzero_numpy, init7_s_0) -> output_0
SequenceAt(nonzero_numpy, init7_s_1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u1']
output: name='output_1' type=dtype('int64') shape=['u1']
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
NonZero(x) -> _onx_nonzero0
SplitToSequence(_onx_nonzero0, axis=0, keepdims=0) -> nonzero
SequenceAt(nonzero, init7_s_0) -> output_0
SequenceAt(nonzero, init7_s_1) -> output_1
output: name='output_0' type=dtype('int64') shape=['d_output_0_0']
output: name='output_1' type=dtype('int64') shape=['d_output_1_0']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
Constant(value_ints=[1]) -> unbind_axis
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
Split(nonzero, axis=1, num_outputs=2) -> unbind_split_0, unbind_split_1
Squeeze(unbind_split_0, unbind_axis) -> unbind_split_0_squeeze
Identity(unbind_split_0_squeeze) -> getitem
Squeeze(unbind_split_1, unbind_axis) -> unbind_split_1_squeeze
Identity(unbind_split_1_squeeze) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['']
output: name='getitem_1' type=dtype('int64') shape=['']
script¶
opset: domain='' version=17
input: name='onnx::NonZero_0' type=dtype('float32') shape=[3, 4]
Constant(value=[1, 1]) -> /Constant_output_0
NonZero(onnx::NonZero_0) -> /NonZero_output_0
Transpose(/NonZero_output_0, perm=[1,0]) -> /Transpose_output_0
Split(/Transpose_output_0, /Constant_output_0, axis=1) -> /Split_output_0, /Split_output_1
Constant(value=[1]) -> /Constant_1_output_0
Squeeze(/Split_output_0, /Constant_1_output_0) -> 7
Constant(value=[1]) -> /Constant_2_output_0
Squeeze(/Split_output_1, /Constant_2_output_0) -> 9
output: name='7' type=dtype('int64') shape=['Squeeze7_dim_0']
output: name='9' type=dtype('int64') shape=['Squeeze7_dim_0']
AtenRollPos¶
forward¶
def forward(self, x):
return torch.roll(x, 1, -1)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> output
output: name='output' type=dtype('float32') shape=[2, 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[2, 3, 4]
Constant(value_ints=[0]) -> tmp_4
Constant(value=[-1]) -> dim_tensor
Constant(value=[1]) -> shift_tensor
Constant(value=[3]) -> slice_length_3
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Constant(value=[24]) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
output: name='roll' type=dtype('float32') shape=[2, 3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[2, 3, 4]
Constant(value=[-1]) -> /Constant_output_0
Constant(value=[-1]) -> /Constant_1_output_0
Constant(value=[922337203...) -> /Constant_2_output_0
Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0) -> /Slice_output_0
Constant(value=[-1]) -> /Constant_3_output_0
Constant(value=[0]) -> /Constant_4_output_0
Constant(value=[-1]) -> /Constant_5_output_0
Slice(onnx::Slice_0, /Constant_4_output_0, /Constant_5_output_0, /Constant_3_output_0) -> /Slice_1_output_0
Concat(/Slice_output_0, /Slice_1_output_0, axis=-1) -> 9
output: name='9' type=dtype('float32') shape=[2, 3, 4]
AtenRollRelu¶
forward¶
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> _onx_concat0
Relu(_onx_concat0) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> _onx_concat0
Relu(_onx_concat0) -> output
output: name='output' type=dtype('float32') shape=[2, 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[2, 3, 4]
Constant(value_ints=[0]) -> tmp_4
Constant(value=[-1]) -> dim_tensor
Constant(value=[-1]) -> shift_tensor
Constant(value=[1]) -> slice_length_3
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Constant(value=[24]) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=[2, 3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[2, 3, 4]
Constant(value=[-1]) -> /Constant_output_0
Constant(value=[1]) -> /Constant_1_output_0
Constant(value=[922337203...) -> /Constant_2_output_0
Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0) -> /Slice_output_0
Constant(value=[-1]) -> /Constant_3_output_0
Constant(value=[0]) -> /Constant_4_output_0
Constant(value=[1]) -> /Constant_5_output_0
Slice(onnx::Slice_0, /Constant_4_output_0, /Constant_5_output_0, /Constant_3_output_0) -> /Slice_1_output_0
Concat(/Slice_output_0, /Slice_1_output_0, axis=-1) -> /Concat_output_0
Relu(/Concat_output_0) -> 10
output: name='10' type=dtype('float32') shape=[2, 3, 4]
BuildInIsInstance¶
forward¶
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.443, 0.327, -0.031], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.457], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
FAILED
Type is unknown for result 'lx', known_types={'x': 1, 'linear.weight': 1, 'linear.bias': 1, '_sub_Linear_input_1': 1, '_sub_Linear__onx_transpose0': 1, '_sub_Linear__onx_matmul0': 1, '_sub_Linear_linear': 1, '_sub_Linear_output': 1, 'linear': 1, 'sigmoid': 1, 'buff': 1, 'sub': 1}
--DEBUG--
[GraphBuilder-LFS] Message starts, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': (4, 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': (4, 3),
'_sub_Linear_linear': (4, 1),
'_sub_Linear_output': (4, 1),
'buff': (1,),
'linear': (4, 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': (4, 1),
'sub': (4, 1),
'x': (4, 3)}
_known_constants=['_sub_Linear__onx_transpose0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
add -> {output}
buff -> {sub}
linear -> {sigmoid}
lx -> {add}
sigmoid -> {sub}
sub -> {add}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
lx: ('run_node', ('', '')) --- :::
linear: ('run_node', ('', '')) --- 1:2:(4, 1):
sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:(4, 1):
add: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list | torch.Tensor [num_users=1] = placeholder[target=lx]
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %lx), kwargs = {})
return add
-- process.progress --
node 6/8 target=<built-in function add>
--
[GraphBuilder-LFS.make_tensor_input] x[1:4x3]
[GraphBuilder-LFS.make_tensor_input] lx[0:]
[GraphBuilder-LFS.make_initializer] linear.weight[torch.float32:torch.float32:[0.18872031569480896, -0.22813834249973297, 0.44105055928230286]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-LFS.make_initializer] linear.bias[torch.float32:torch.float32:[-0.26602017879486084]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-LFS.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-LFS.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-LFS.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-LFS.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-LFS.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-LFS.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-LFS.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-LFS.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-LFS.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-LFS] Message completed, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.339], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Constant(value=[[0.486663...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub
Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]
script¶
opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.3 , 0.118, 0.517], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.087], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_6
ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_6, keepdims=1) -> /ReduceSum_output_0
Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]
BuildInLen¶
forward¶
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.376, -0.151, 0.388], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.313], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
FAILED
len(.) expects an integer, len needs to be replaced. You should use _len.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.334], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Constant(value=[[0.441906...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub
Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]
script¶
opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.211, -0.33 , 0.426], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.356], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_6
ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_6, keepdims=1) -> /ReduceSum_output_0
Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]
ComplexPolar¶
forward¶
def forward(self, x, angle):
return torch.polar(x, angle)
custom-fallback¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast02) of operator (Mul) in node (polar5) is invalid.
custom-tracing¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast02) of operator (Mul) in node (polar5) is invalid.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
input: name='angle' type=dtype('float32') shape=[4, 4]
Constant(value=[-1]) -> int64_m1_1d
Cos(angle) -> tmp
Mul(x, tmp) -> tmp_0
Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
Mul(x, tmp_1) -> tmp_2
Constant(value=[-1]) -> int64_m1_1d_3
Unsqueeze(tmp_2, int64_m1_1d_3) -> imag
Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=[4, 4, 2]
script¶
FAILED
Exporting the operator 'aten::polar' to ONNX opset version 17 is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: https://github.com/pytorch/pytorch/issues.
ControlFlowCond¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0
Cos(x) -> cos_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=[5, 3]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Inputs¶
forward¶
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 3]
input: name='y' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
input: 'y'
Cos(x) -> cos
Add(cos, y) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
input: 'y'
Cos(x) -> output_0
Sin(x) -> sin
Add(sin, y) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'IndexError'>: tuple index out of range
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Outputs¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=[5, 3]
output: name='output_1' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Cos(x) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
Sin(x) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'IndexError'>: tuple index out of range
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondConstant¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[1024, 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Constant(value=[1024, 102...) -> init7_s2_1024_1024
ConstantOfShape(init7_s2_1024_1024, value=[1.0]) -> ones
Sin(x) -> sin
Sub(sin, ones) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Constant(value=[1, 1024]) -> init7_s2_1_1024
ConstantOfShape(init7_s2_1_1024, value=[1.0]) -> ones
Cos(x) -> cos
Add(cos, ones) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=[1024, 1024]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> sub_true_graph_0
Constant(value=[1024, 102...) -> val_1
Sin(x) -> sin
Constant(value=1.0) -> val_3
Expand(val_3, val_1) -> ones
Sub(sin, ones) -> sub_true_graph_0
output: name='sub_true_graph_0' type=dtype('float32') shape=[1024, 1024]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> add_false_graph_0
Constant(value=[[1.0, 1.0...) -> ones_2
Cos(x) -> cos
Add(cos, ones_2) -> add_false_graph_0
output: name='add_false_graph_0' type=dtype('float32') shape=[1024, 1024]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondNestedModule¶
forward¶
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('int64') shape=[2]
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast0
Mul(_onx_cast0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast0
Div(_onx_cast0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Abs(x) -> abs_1
ReduceSum(abs_1, keepdims=0) -> sum_1
Constant(value=100) -> init7_s_100
Greater(sum_1, init7_s_100) -> gt
If(gt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - aten_cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Cast(x, to=1) -> _onx_cast0
Sub(_onx_cast0, p_weight) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('int64') shape=[2]
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
Constant(value=0) -> val_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, val_0) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_3 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Abs(x) -> abs_1
ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Constant(value=100) -> val_0_2
Greater(sum_1_2, val_0_2) -> gt_2
If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=2 -- -> mul_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_true_graph_0__true_graph_0
output: name='mul_true_graph_0__true_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=2 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=1 -- -> mul_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_true_graph_0__true_graph_0
output: name='mul_true_graph_0__true_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=1 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=[2]
----- subgraph ---- If - node_If_3 - att.else_branch=G2 -- level=1 -- -> sub_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_false_graph_0
output: name='sub_false_graph_0' type=dtype('float32') shape=[2]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowNestCond¶
forward¶
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Constant(value=0.0) -> init1_s_
ReduceSum(x, keepdims=0) -> sum_1
Less(sum_1, init1_s_) -> lt
If(lt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - aten_cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
Neg(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=[5, 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Constant(value=0.0) -> scalar_tensor_default_2
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Less(sum_1_2, scalar_tensor_default_2) -> lt
If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=2 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=2 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=1 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=1 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=[5, 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=[5, 3]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowScan¶
forward¶
def forward(self, x):
init = torch.zeros_like(x[0])
carry, out = torch.ops.higher_order.scan(
ControlFlowScan.add, [init], [x], dim=0, reverse=False, additional_inputs=[]
)
return carry
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Add(arg0_1, arg1_1) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScan2Carried¶
forward¶
def forward(self, x):
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
ControlFlowScan2Carried.add,
[init1, init2],
[x, x * 2],
dim=0,
reverse=False,
additional_inputs=[],
)
return carry1, carry2, out1, out2
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(x, _onx_reshape0) -> _onx_mul0
Scan(zeros_like, ones_like, x, _onx_mul0, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=[3, 4]
output: name='output_3' type=dtype('float32') shape=[3, 4]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, init_1_ones_like, scan_0_x, scan_1_mul) -> output_0, output_1, output_2, output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
input: 'arg3_1'
Add(arg0_1, arg2_1) -> output_0
Identity(output_0) -> output_2
Mul(arg1_1, arg3_1) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
output: name='output_2' type=? shape=?
output: name='output_3' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist¶
forward¶
def forward(self, x):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDist.dist,
[x],
[x],
dim=0,
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 3]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_x, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist2¶
forward¶
def forward(self, x):
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
ControlFlowScanCDist2.dist,
[z],
[x],
dim=0,
reverse=False,
additional_inputs=[y],
)
return out[1]
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 3]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_detach_, scan_0_x, hidden_input_scan_0_clone) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg2_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
[CustomProxy(clone)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>,)
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%lift_fresh_copy], [%x], 0, False, [%clone]), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDistXY¶
forward¶
def forward(self, x, y):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDistXY.dist,
[y],
[x],
dim=0,
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
input: name='y' type=dtype('float32') shape=[5, 4]
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=[3, 5]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_y, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
CreateFromShape¶
forward¶
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Constant(value=[[1.0, 1.0...) -> ones
output: name='ones' type=dtype('float32') shape=[4, 5]
script¶
FAILED
Input mismatch, inputs[0]=(T1r2,) but names=[], model=CreateFromShape, export='script'
CreateFromShapeThroughFunction¶
forward¶
def forward(self, x):
dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 4]
init: name='init7_s2_4_5' type=int64 shape=(2,) -- array([4, 5]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s2_4_5, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 5]
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 4]
Constant(value=[[1.0, 1.0...) -> ones
output: name='ones' type=dtype('float32') shape=[4, 5]
script¶
FAILED
Input mismatch, inputs[0]=(T1r2,) but names=[], model=CreateFromShapeThroughFunction, export='script'
CropLastDimensionWithTensorContent¶
forward¶
def forward(self, x, shape):
return x[..., : shape[0]]
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T1r3,T7r1), kwargs=None, exception=
-----
[(ExportOptions(),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:581 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(decomposition_table='default'),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False, decomposition_table='default'),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:581 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(dynamo=True),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(decomposition_table='default', dynamo=True),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(jit=True),
GuardOnDataDependentSymNode('Could not guard on data-dependent expression u0 < 0 (unhinted: u0 < 0). (Size-like symbols: none)\n\nCaused by: (_decomp/decompositions.py:733 in slice_forward)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "<string>", line 1, in <lambda>\n\n\nWhile executing %slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %_local_scalar_dense_default, 1), kwargs = {})\nOriginal traceback:\nNone'))]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Gather(shape, init7_s1_0) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Slice(x, init7_s1_0, _onx_unsqueeze0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode'>: Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (_export/non_strict_utils.py:581 in __torch_function__)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
The following call raised this error:
File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward
return x[..., : shape[0]]
(Refer to the full stack trace above for more information.)
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[3, 4, 4]
input: name='onnx::Gather_1' type=dtype('int64') shape=[1]
Constant(value=0) -> /Constant_output_0
Gather(onnx::Gather_1, /Constant_output_0, axis=0) -> /Gather_output_0
Constant(value=[2]) -> /Constant_1_output_0
Constant(value=[0]) -> /Constant_2_output_0
Constant(value=[0]) -> /Constant_3_output_0
Unsqueeze(/Gather_output_0, /Constant_3_output_0) -> /Unsqueeze_output_0
Constant(value=[1]) -> /Constant_4_output_0
Slice(onnx::Slice_0, /Constant_2_output_0, /Unsqueeze_output_0, /Constant_1_output_0, /Constant_4_output_0) -> 14
output: name='14' type=dtype('float32') shape=[3, 4, 'Slice14_dim_2']
CropLastDimensionWithTensorShape¶
forward¶
def forward(self, x, y):
return x[..., : y.shape[0]]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Slice(x, init7_s1_0, init7_s1_2, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4, 2]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_0) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Slice(x, init7_s1_0, _onx_unsqueeze0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4, 4]
input: name='y' type=dtype('float32') shape=[2]
Constant(value_ints=[1]) -> val_11
Constant(value=[0]) -> val_3
Constant(value=[2]) -> val_7
Constant(value=[2]) -> val_10
Slice(x, val_3, val_7, val_10, val_11) -> slice_1
output: name='slice_1' type=dtype('float32') shape=[3, 4, 2]
script¶
FAILED
Input mismatch, inputs[0]=(T1r3,T1r1) but names=['onnx::Slice_0'], model=CropLastDimensionWithTensorShape, export='script'
InplaceAdd¶
forward¶
def forward(self, x):
x += self.bias
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=[3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]
InplaceAdd_¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-AYQ] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': (3, 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
return add_
-- process.progress --
node 2/4 target=add_
--
[GraphBuilder-AYQ.make_tensor_input] x[1:3x4]
[GraphBuilder-AYQ.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-AYQ] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]
InplaceAdd_Mul¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x * 2
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Add(x, c_bias) -> add_
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(add_, _onx_reshape0) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-AGM] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': (3, 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {mul}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
return mul
-- process.progress --
node 2/5 target=add_
--
[GraphBuilder-AGM.make_tensor_input] x[1:3x4]
[GraphBuilder-AGM.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-AGM] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
Constant(value=2.0) -> scalar_tensor_default
Mul(add, scalar_tensor_default) -> mul
output: name='mul' type=dtype('float32') shape=[3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
Add(onnx::Add_0, /Constant_output_0) -> /Add_output_0
Constant(value=2.0) -> /Constant_1_output_0
Mul(/Add_output_0, /Constant_1_output_0) -> 4
output: name='4' type=dtype('float32') shape=[3, 4]
InplaceCloneAdd¶
forward¶
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=[3, 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(clone, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-PWA] Message starts, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'bias': 1, 'clone': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'clone': (3, 4), 'x': (3, 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
clone -> {add_}
x -> {clone}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:(3, 4):
clone: ('run_node', ('', '')) --- 1:2:(3, 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
return add_
-- process.progress --
node 3/5 target=add_
--
[GraphBuilder-PWA.make_tensor_input] x[1:3x4]
[GraphBuilder-PWA.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-PWA.make_node] .clone [#:# ] Identity:['x']->['clone']
[GraphBuilder-PWA] Message completed, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[3, 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add
output: name='add' type=dtype('float32') shape=[3, 4]
script¶
opset: domain='' version=17
input: name='onnx::Add_0' type=dtype('float32') shape=[3, 4]
Constant(value=[[1.0, 1.0...) -> /Constant_output_0
Add(onnx::Add_0, /Constant_output_0) -> 2
output: name='2' type=dtype('float32') shape=[3, 4]
InplaceSetItemEllipsis_1¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='c_params' type=float32 shape=(1, 8192, 4) -- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s3_-1_1_1' type=int64 shape=(3,) -- array([-1, 1, 1])-- Opset.make_node.1/Shape
init: name='init7_s8192_' type=int64 shape=(8192,) -- Opset.make_node.0
init: name='init7_s3_1_-1_1' type=int64 shape=(3,) -- array([ 1, -1, 1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1, 1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([ 1, 8192, 4])-- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_32768' type=int64 shape=(1,) -- array([32768]) -- Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s32768_' type=int64 shape=(32768,) -- Opset.make_node.0
Expand(update, init7_s3_1_8192_4) -> _onx_expand0
Reshape(_onx_expand0, init7_s1_-1) -> _onx_reshape05
Reshape(init7_s1_0, init7_s3_-1_1_1) -> _onx_reshape0
Mul(_onx_reshape0, init7_s1_32768) -> _onx_mul0
Reshape(init7_s8192_, init7_s3_1_-1_1) -> _onx_reshape02
Mul(_onx_reshape02, init7_s1_4) -> _onx_mul02
Reshape(index, init7_s3_1_1_-1) -> _onx_reshape03
Add(_onx_mul0, _onx_reshape03) -> add-_onx_mul0
Add(add-_onx_mul0, _onx_mul02) -> _onx_add02
Reshape(_onx_add02, init7_s1_-1) -> _onx_reshape04
GatherElements(init7_s32768_, _onx_reshape04) -> _onx_gatherelements0
Reshape(c_params, init7_s1_-1) -> _onx_reshape06
ScatterElements(_onx_reshape06, _onx_gatherelements0, _onx_reshape05) -> _onx_scatterelements0
Reshape(_onx_scatterelements0, init7_s3_1_8192_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 4]
custom-tracing¶
FAILED
setitem not implemented for indices=(Ellipsis, 'index')
--DEBUG--
[GraphBuilder-ZBK] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 4), 'index': (4,), 'update': (8192, 4)}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 4):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-ZBK.make_tensor_input] index[7:4]
[GraphBuilder-ZBK.make_tensor_input] update[1:8192x4]
[GraphBuilder-ZBK.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-ZBK] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: false INTERNAL ASSERT FAILED at "/pytorch/build/aten/src/ATen/RegisterFunctionalization_1.cpp":5941, please report a bug to PyTorch. mutating a non-functional tensor with a functional tensor is not allowed. Please ensure that all of your inputs are wrapped inside of a functionalize() call.
While executing %index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
Original traceback:
File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 116, in forward
copy[..., index] = update
(Refer to the full stack trace above for more information.)
script¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("/Add", Add, "", -1) : ("","",) -> ("/Add_output_0",) , Error Node (/Add)'s input 0 is marked single but has an empty string in the graph
InplaceSetItemEllipsis_2¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='index' type=dtype('int64') shape=[4]
input: name='update' type=dtype('float32') shape=[8192, 4]
init: name='c_params' type=float32 shape=(1, 8192, 6) -- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s3_-1_1_1' type=int64 shape=(3,) -- array([-1, 1, 1])-- Opset.make_node.1/Shape
init: name='init7_s8192_' type=int64 shape=(8192,) -- Opset.make_node.0
init: name='init7_s3_1_-1_1' type=int64 shape=(3,) -- array([ 1, -1, 1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_1_-1' type=int64 shape=(3,) -- array([ 1, 1, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_1_8192_4' type=int64 shape=(3,) -- array([ 1, 8192, 4])-- Opset.make_node.1/Shape
init: name='init7_s1_49152' type=int64 shape=(1,) -- array([49152]) -- Opset.make_node.1/Shape
init: name='init7_s1_6' type=int64 shape=(1,) -- array([6]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s49152_' type=int64 shape=(49152,) -- Opset.make_node.0
init: name='init7_s3_1_8192_6' type=int64 shape=(3,) -- array([ 1, 8192, 6])-- Opset.make_node.1/Shape
Expand(update, init7_s3_1_8192_4) -> _onx_expand0
Reshape(_onx_expand0, init7_s1_-1) -> _onx_reshape05
Reshape(init7_s1_0, init7_s3_-1_1_1) -> _onx_reshape0
Mul(_onx_reshape0, init7_s1_49152) -> _onx_mul0
Reshape(init7_s8192_, init7_s3_1_-1_1) -> _onx_reshape02
Mul(_onx_reshape02, init7_s1_6) -> _onx_mul02
Reshape(index, init7_s3_1_1_-1) -> _onx_reshape03
Add(_onx_mul0, _onx_reshape03) -> add-_onx_mul0
Add(add-_onx_mul0, _onx_mul02) -> _onx_add02
Reshape(_onx_add02, init7_s1_-1) -> _onx_reshape04
GatherElements(init7_s49152_, _onx_reshape04) -> _onx_gatherelements0
Reshape(c_params, init7_s1_-1) -> _onx_reshape06
ScatterElements(_onx_reshape06, _onx_gatherelements0, _onx_reshape05) -> _onx_scatterelements0
Reshape(_onx_scatterelements0, init7_s3_1_8192_6) -> output_0
output: name='output_0' type=dtype('float32') shape=[1, 8192, 6]
custom-tracing¶
FAILED
setitem not implemented for indices=(Ellipsis, 'index')
--DEBUG--
[GraphBuilder-EWU] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 6), 'index': (4,), 'update': (8192, 4)}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 6):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-EWU.make_tensor_input] index[7:4]
[GraphBuilder-EWU.make_tensor_input] update[1:8192x4]
[GraphBuilder-EWU.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-EWU] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: false INTERNAL ASSERT FAILED at "/pytorch/build/aten/src/ATen/RegisterFunctionalization_1.cpp":5941, please report a bug to PyTorch. mutating a non-functional tensor with a functional tensor is not allowed. Please ensure that all of your inputs are wrapped inside of a functionalize() call.
While executing %index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
Original traceback:
File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 134, in forward
copy[..., index] = update
(Refer to the full stack trace above for more information.)
script¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("/Add", Add, "", -1) : ("","",) -> ("/Add_output_0",) , Error Node (/Add)'s input 0 is marked single but has an empty string in the graph
InplaceSetItemMask¶
forward¶
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[2, 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 3, 3]
custom-tracing¶
FAILED
setitem not implemented for indices=to
--DEBUG--
[GraphBuilder-ROQ] Message starts, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'to': 9, 'x': 1}
_known_shapes={'to': (2, 3, 3), 'x': (2, 3, 3)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {output}
to -> {setitem}
x -> {setitem, to}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 3, 3])), '')) --- 1:3:(2, 3, 3):
to: ('run_node', ('', '')) --- 9:3:(2, 3, 3):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=2] = placeholder[target=x]
%to : [num_users=1] = call_method[target=to](args = (%x, torch.bool), kwargs = {})
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, %to, 2), kwargs = {})
return setitem
-- process.progress --
node 2/4 target=<built-in function setitem>
--
[GraphBuilder-ROQ.make_tensor_input] x[1:2x3x3]
[GraphBuilder-ROQ.make_node] .to [#:# ] Cast>9:['x']->['to']
[GraphBuilder-ROQ] Message completed, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Non-zero status code returned while running If node. Name:'n7_2' Status Message: Non-zero status code returned while running ScatterElements node. Name:'n15_n0_7' Status Message: Indices and updates must have the same rank
script¶
opset: domain='' version=17
input: name='onnx::Cast_0' type=dtype('float32') shape=[2, 3, 3]
Cast(onnx::Cast_0, to=9) -> /Cast_output_0
Cast(/Cast_output_0, to=9) -> /Cast_1_output_0
Constant(value=2.0) -> /Constant_output_0
Where(/Cast_1_output_0, /Constant_output_0, onnx::Cast_0) -> 4
output: name='4' type=dtype('float32') shape=[2, 3, 3]
InplaceSetItemSquare¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s2_0_1' type=int64 shape=(2,) -- array([0, 1]) -- Opset.make_node.1/Shape
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Reshape(init7_s2_0_1, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-GUA] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': (5, 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {output}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:(5, 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
return setitem
-- process.progress --
node 1/3 target=<built-in function setitem>
--
[GraphBuilder-GUA.make_tensor_input] x[1:5x5]
[GraphBuilder-GUA] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=[5, 5]
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> 58
output: name='58' type=dtype('float32') shape=[5, 5]
InplaceSetItemSquareAdd¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s2_0_1' type=int64 shape=(2,) -- array([0, 1]) -- Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Reshape(init7_s2_0_1, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _onx_reshape03
Add(output_0, _onx_reshape03) -> output_1
output: name='output_1' type=dtype('float32') shape=[5, 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-FGQ] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': (5, 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {add}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:(5, 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
return add
-- process.progress --
node 1/4 target=<built-in function setitem>
--
[GraphBuilder-FGQ.make_tensor_input] x[1:5x5]
[GraphBuilder-FGQ] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
output: name='add' type=dtype('float32') shape=[5, 5]
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> /ScatterND_output_0
Constant(value=2.0) -> /Constant_20_output_0
Add(/ScatterND_output_0, /Constant_20_output_0) -> 60
output: name='60' type=dtype('float32') shape=[5, 5]
InplaceSetItemSquareAdd2¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[5, 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s2_0_1' type=int64 shape=(2,) -- array([0, 1]) -- Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
init: name='init1_s_2' type=float32 shape=() -- array([3.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Reshape(init7_s2_0_1, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _onx_reshape03
Add(output_0, _onx_reshape03) -> output_1
Reshape(init1_s_2, init7_s1_1) -> _onx_reshape04
Add(output_0, _onx_reshape04) -> output_2
output: name='output_1' type=dtype('float32') shape=[5, 5]
output: name='output_2' type=dtype('float32') shape=[5, 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-LDO] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': (5, 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {add, add_1}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:(5, 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=2] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%setitem, 3), kwargs = {})
return (add, add_1)
-- process.progress --
node 1/5 target=<built-in function setitem>
--
[GraphBuilder-LDO.make_tensor_input] x[1:5x5]
[GraphBuilder-LDO] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=[5, 5]
Constant(value=[[0], [1],...) -> val_43
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value=[[0], [1]]) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
Constant(value=3.0) -> scalar_tensor_default_1
Add(slice_scatter_1, scalar_tensor_default_1) -> add_1
output: name='add' type=dtype('float32') shape=[5, 5]
output: name='add_1' type=dtype('float32') shape=[5, 5]
script¶
opset: domain='' version=17
input: name='onnx::Slice_0' type=dtype('float32') shape=[5, 5]
Constant(value=[0]) -> /Constant_output_0
Constant(value=[0]) -> /Constant_1_output_0
Constant(value=[2]) -> /Constant_2_output_0
Constant(value=[1]) -> /Constant_3_output_0
Slice(onnx::Slice_0, /Constant_1_output_0, /Constant_2_output_0, /Constant_output_0, /Constant_3_output_0) -> /Slice_output_0
Constant(value=[1]) -> /Constant_4_output_0
Constant(value=[0]) -> /Constant_5_output_0
Constant(value=[3]) -> /Constant_6_output_0
Constant(value=[1]) -> /Constant_7_output_0
Slice(/Slice_output_0, /Constant_5_output_0, /Constant_6_output_0, /Constant_4_output_0, /Constant_7_output_0) -> /Slice_1_output_0
Shape(/Slice_1_output_0) -> /Shape_output_0
Constant(value=[[1.0]]) -> /Constant_8_output_0
Expand(/Constant_8_output_0, /Shape_output_0) -> /Expand_output_0
Constant(value=[0, 1, 2]) -> /Constant_9_output_0
Constant(value=[[0], [1]]) -> onnx::Expand_27
Constant(value=[2, 3]) -> /Constant_10_output_0
Constant(value=[2]) -> /Constant_11_output_0
ConstantOfShape(/Constant_11_output_0, value=[1]) -> /ConstantOfShape_output_0
Constant(value=-1) -> /Constant_12_output_0
Mul(/ConstantOfShape_output_0, /Constant_12_output_0) -> /Mul_output_0
Equal(/Constant_10_output_0, /Mul_output_0) -> /Equal_output_0
Where(/Equal_output_0, /ConstantOfShape_output_0, /Constant_10_output_0) -> /Where_output_0
Expand(onnx::Expand_27, /Where_output_0) -> /Expand_1_output_0
Constant(value=[-1]) -> /Constant_13_output_0
Unsqueeze(/Expand_1_output_0, /Constant_13_output_0) -> /Unsqueeze_output_0
Constant(value=[2]) -> /Constant_14_output_0
ConstantOfShape(/Constant_14_output_0, value=[1]) -> /ConstantOfShape_1_output_0
Constant(value=-1) -> /Constant_15_output_0
Mul(/ConstantOfShape_1_output_0, /Constant_15_output_0) -> /Mul_1_output_0
Equal(/Constant_10_output_0, /Mul_1_output_0) -> /Equal_1_output_0
Where(/Equal_1_output_0, /ConstantOfShape_1_output_0, /Constant_10_output_0) -> /Where_1_output_0
Expand(/Constant_9_output_0, /Where_1_output_0) -> /Expand_2_output_0
Constant(value=[-1]) -> /Constant_16_output_0
Unsqueeze(/Expand_2_output_0, /Constant_16_output_0) -> /Unsqueeze_1_output_0
Concat(/Unsqueeze_output_0, /Unsqueeze_1_output_0, axis=-1) -> /Concat_output_0
Shape(onnx::Slice_0) -> /Shape_1_output_0
Constant(value=[0]) -> /Constant_17_output_0
Constant(value=[2]) -> /Constant_18_output_0
Constant(value=[922337203...) -> /Constant_19_output_0
Slice(/Shape_1_output_0, /Constant_18_output_0, /Constant_19_output_0, /Constant_17_output_0) -> /Slice_2_output_0
Concat(/Constant_10_output_0, /Slice_2_output_0, axis=0) -> /Concat_1_output_0
Reshape(/Expand_output_0, /Concat_1_output_0, allowzero=0) -> /Reshape_output_0
ScatterND(onnx::Slice_0, /Concat_output_0, /Reshape_output_0) -> /ScatterND_output_0
Constant(value=2.0) -> /Constant_20_output_0
Add(/ScatterND_output_0, /Constant_20_output_0) -> 60
Constant(value=3.0) -> /Constant_21_output_0
Add(/ScatterND_output_0, /Constant_21_output_0) -> 62
output: name='60' type=dtype('float32') shape=[5, 5]
output: name='62' type=dtype('float32') shape=[5, 5]
SignatureFloat1¶
forward¶
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([1.5], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.113, 0.051, 0.52 ], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.549], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(b_buff, _onx_reshape0) -> _onx_mul0
Sub(sigmoid, _onx_mul0) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-QGU] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'alpha': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': (4, 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': (4, 3),
'_sub_Linear_linear': (4, 1),
'_sub_Linear_output': (4, 1),
'alpha': (1,),
'buff': (1,),
'linear': (4, 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': (4, 1),
'x': (4, 3)}
_known_constants=['_sub_Linear__onx_transpose0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
alpha -> {mul}
buff -> {mul}
linear -> {sigmoid}
mul -> {sub}
sigmoid -> {sub}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
alpha: ('run_node', ('', '')) --- 1:1:(1,):
linear: ('run_node', ('', '')) --- 1:2:(4, 1):
sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
mul: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
-- process.progress --
node 5/8 target=mul
--
[GraphBuilder-QGU.make_tensor_input] x[1:4x3]
[GraphBuilder-QGU.make_tensor_input] alpha[1:1]
[GraphBuilder-QGU.make_initializer] linear.weight[torch.float32:torch.float32:[-0.015062670223414898, 0.3196617662906647, 0.5069112181663513]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-QGU.make_initializer] linear.bias[torch.float32:torch.float32:[0.262298583984375]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-QGU.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-QGU.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-QGU.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-QGU.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-QGU.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-QGU.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-QGU.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-QGU.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-QGU] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'
script¶
FAILED
[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Unexpected input data type. Actual: (tensor(float)) , expected: (tensor(double))
SignatureInt1¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.08 , -0.448, 0.258], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.028], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
Add(sub, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.452, -0.168, -0.495], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.232], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='getitem_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='getitem_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
Concat(init7_s1_0, i, axis=0) -> _onx_concat02
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
Reshape(init7_s_1, init7_s1_1) -> _onx_reshape0
Add(i, _onx_reshape0) -> _onx_add0
Concat(init7_s1_4, _onx_add0, axis=0) -> _onx_concat0
Slice(x, _onx_concat02, _onx_concat0, getitem_axis, getitem_step) -> getitem
Add(sub, getitem) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'
script¶
FAILED
[ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Slice node. Name:'/Slice' Status Message: slice.cc:195 FillVectorsFromInput Starts must be a 1-D array
SignatureInt2¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.283, 0.411, 0.159], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.344], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, select) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]
custom-tracing¶
FAILED
One index is given as an integer i but this requires to append a node 'Squeeze' after this one and this is not yet implemented. You can replace the integer by `i:i+1`
--DEBUG--
[GraphBuilder-UBO] Message starts, there are 4 initializers, 8 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'getitem_axis': 7,
'i': 7,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': (4, 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': (4, 3),
'_sub_Linear_linear': (4, 1),
'_sub_Linear_output': (4, 1),
'buff': (1,),
'getitem_axis': (2,),
'i': (1,),
'linear': (4, 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': (4, 1),
'sub': (4, 1),
'x': (4, 3)}
_known_constants=['_sub_Linear__onx_transpose0',
'buff',
'getitem_axis',
'linear.bias',
'linear.weight']
_known_ranks={}
--TORCH-USERS--
buff -> {sub}
getitem -> {add}
i -> {getitem}
linear -> {sigmoid}
sigmoid -> {sub}
sub -> {add}
x -> {linear, getitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
i: ('run_node', ('', '')) --- 7:1:(1,):
linear: ('run_node', ('', '')) --- 1:2:(4, 1):
sigmoid: ('run_node', ('', '')) --- 1:2:(4, 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:(4, 1):
getitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=1] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add
-- process.progress --
node 6/9 target=<built-in function getitem>
--
[GraphBuilder-UBO.make_tensor_input] x[1:4x3]
[GraphBuilder-UBO.make_tensor_input] i[7:1]
[GraphBuilder-UBO.make_initializer] linear.weight[torch.float32:torch.float32:[-0.44255390763282776, 0.37168198823928833, -0.3502069413661957]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-UBO.make_initializer] linear.bias[torch.float32:torch.float32:[0.5427876114845276]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-UBO.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-UBO.make_initializer] getitem_axis[int64:int64:[0, 1]] - SOURCE: DynamoInterpreter._getitem_slice.axis.1
[GraphBuilder-UBO.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-UBO.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-UBO.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-UBO.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-UBO.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-UBO.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-UBO.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-UBO.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-UBO] Message completed, there are 4 initializers, 8 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='dynamo-ir'
script¶
opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Gather_1' type=dtype('int64') shape=None
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.454, -0.57 , -0.019], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.154], dtype=float32)
Gather(onnx::Gemm_0, onnx::Gather_1, axis=1) -> /Gather_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
Add(/Sub_output_0, /Gather_output_0) -> 9
output: name='9' type=dtype('float32') shape=[4, 4]
SignatureListFixedLength¶
forward¶
def forward(self, x, lx: list):
return (
torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.309, -0.131, 0.478], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.189], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx' type='NOTENSOR' shape=None
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.21 , -0.209, -0.159], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.308], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
SequenceAt(lx, init7_s_0) -> getitem
SequenceAt(lx, init7_s_1) -> getitem_1
ReduceSum(getitem_1, init7_s1_1, keepdims=1) -> sum_1
Mul(getitem, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=[4, 1]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.256], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[[0.183093...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> add
output: name='add' type=dtype('float32') shape=[4, 1]
script¶
opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Mul_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::ReduceSum_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.285, -0.179, 0.475], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.059], dtype=float32)
Constant(value=[1]) -> onnx::ReduceSum_9
ReduceSum(onnx::ReduceSum_2, onnx::ReduceSum_9, keepdims=1) -> /ReduceSum_output_0
Mul(onnx::Mul_1, /ReduceSum_output_0) -> /Mul_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
Add(/Sub_output_0, /Mul_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]
SignatureListFixedWithNone¶
forward¶
def forward(self, lx):
print(lx)
print(lx[1])
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
custom-fallback¶
FAILED
Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['lx_0', 'lx_1'], model=SignatureListFixedWithNone, export='custom-fallback'
custom-tracing¶
CustomProxy(lx) CustomProxy(getitem) FAILED
Unable to create an input 'lx' with type #3[T1r2,T1r2,None]
--DEBUG--
[GraphBuilder-FZK] Message starts, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={}
_known_shapes={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
lx -> {getitem_1, getitem_5, getitem_4, getitem, getitem_2, getitem_3}
--TORCH-SHAPES--
lx: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%lx : [num_users=6] = placeholder[target=lx]
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 0), kwargs = {})
%getitem_2 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%getitem_1, %getitem_3), kwargs = {})
%getitem_4 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%add, %getitem_5), kwargs = {})
return add_1
-- process.progress --
node 0/10 target=lx
--
[GraphBuilder-FZK] Message completed, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['lx_0', 'lx_1'], model=SignatureListFixedWithNone, export='dynamo-ir'
script¶
FAILED
Input mismatch, inputs[0]=(#3[T1r2,T1r2,None],) but names=['inp.1', 'inp'], model=SignatureListFixedWithNone, export='script'
SignatureListVariableLength¶
forward¶
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.234, 0.473, -0.379], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.4], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 1]
custom-tracing¶
FAILED
Type is unknown for result 'l', known_types={'x': 1}
--DEBUG--
[GraphBuilder-CKM] Message starts, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
dynamic_objects_rev=
dynamic_dimensions_source={}
dynamic_alias={}
dynamic_shapes=None
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': (4, 3)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
cat -> {sum_1}
lx -> {cat}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:(4, 3):
lx: ('run_node', ('', '')) --- :::
cat: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list [num_users=1] = placeholder[target=lx]
%cat : [num_users=1] = call_function[target=torch.cat](args = (%lx, 1), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%cat,), kwargs = {axis: 1, keepdim: True})
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %sum_1), kwargs = {})
return add
-- process.progress --
node 2/10 target=<built-in method cat of type object at 0x7fa9d1ef7ba0>
--
[GraphBuilder-CKM.make_tensor_input] x[1:4x3]
[GraphBuilder-CKM.make_tensor_input] lx[0:]
[GraphBuilder-CKM] Message completed, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='lx_0' type=dtype('float32') shape=[4, 1]
input: name='lx_1' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.258], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Concat(lx_0, lx_1, axis=1) -> cat
Constant(value=[1]) -> val_3
ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Constant(value=[[0.061144...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub
Add(sub, sum_1) -> add
output: name='add' type=dtype('float32') shape=[4, 1]
script¶
opset: domain='' version=17
input: name='onnx::Gemm_0' type=dtype('float32') shape=[4, 3]
input: name='onnx::Concat_1' type=dtype('float32') shape=[4, 1]
input: name='onnx::Concat_2' type=dtype('float32') shape=[4, 2]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.45 , -0.575, 0.551], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.343], dtype=float32)
Concat(onnx::Concat_1, onnx::Concat_2, axis=1) -> /Concat_output_0
Constant(value=[1]) -> onnx::ReduceSum_7
ReduceSum(/Concat_output_0, onnx::ReduceSum_7, keepdims=1) -> /ReduceSum_output_0
Gemm(onnx::Gemm_0, linear.weight, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Sub(/Sigmoid_output_0, buff) -> /Sub_output_0
Add(/Sub_output_0, /ReduceSum_output_0) -> 12
output: name='12' type=dtype('float32') shape=[4, 1]
SignatureShapeAsIndex¶
forward¶
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.269, -0.268, -0.107], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.276], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add
Slice(add, init7_s1_0, init7_s1_2, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 2]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.046, 0.272, -0.34 ], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.022], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='getitem_1_start' type=int64 shape=(2,) -- array([0, 0]) -- DynamoInterpreter._getitem_slice.2
init: name='getitem_1_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_1) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Concat(init7_s1_4, _onx_unsqueeze0, axis=0) -> _onx_concat0
Slice(add, getitem_1_start, _onx_concat0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=[4, 3]
input: name='y' type=dtype('float32') shape=[4, 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.063], dtype=float32)
Constant(value_ints=[1]) -> val_23
Constant(value=[[0.370703...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Add(sigmoid, x) -> add
Constant(value=[0]) -> val_14
Constant(value=[2]) -> val_18
Constant(value=[1]) -> val_22
Slice(add, val_14, val_18, val_22, val_23) -> slice_2
output: name='slice_2' type=dtype('float32') shape=[4, 2]
script¶
FAILED
Input mismatch, inputs[0]=(T1r2,T1r2) but names=['onnx::Gemm_0'], model=SignatureShapeAsIndex, export='script'
TypeBFloat16¶
forward¶
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=[4, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=[4, 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=[4, 4]
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_Add_1'
script¶
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name '/Add'
Summary¶
case |
custom-fallback |
custom-tracing |
dynamo-ir |
script |
---|---|---|---|---|
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |