Exported into ONNX with Dynamic Shapes¶
The following script shows the exported program for many short cases
and various way to retrieve the torch.fx.Graph
equivalent
to the original model. The tested scenarios are described at
Tested Scenarios.
<<<
import inspect
import textwrap
import pandas
from experimental_experiment.torch_interpreter.eval import discover, run_exporter
from experimental_experiment.ext_test_case import unit_test_going
from experimental_experiment.helpers import pretty_onnx
cases = discover()
print()
print(":ref:`Summary <lod-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <lod-model-case-export-{name}>`")
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _lod-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print("forward")
print("+++++++")
print()
print("::")
print()
print(
textwrap.indent(textwrap.dedent(inspect.getsource(cls_model.forward)), " ")
)
print()
for exporter in (
"custom-fallback",
"custom-dec",
"custom-tracing",
"dynamo-ir",
"script",
):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, True, quiet=True)
case_ref = f":ref:`{name} <lod-model-case-export-{name}>`"
if "exported" in res:
print("::")
print()
print(textwrap.indent(pretty_onnx(res["onnx"]), " "))
print()
obs.append(dict(case=case_ref, error="", exporter=exporter))
else:
print("**FAILED**")
print()
print("::")
print()
print(textwrap.indent(str(res["error"]), " "))
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=exporter))
print()
print(".. _lod-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided¶
forward¶
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
custom-fallback¶
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=[2, 2, 8, 4], stride=[128, 8, 16, 1], storage_offset=None
--DEBUG--
[GraphBuilder-YDK] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--CONSTRAINTS--
batch = {'s0'}
s0 = {'batch'}
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
s0 = 's0'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s0': 'batch'}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, torch.Size([s0, 2, 8, 8])))) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', ('val', torch.float32, torch.Size([2, 2, 8, 4])))) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(fallback=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
-- process.progress --
node 1/3 target=aten.as_strided.default
--
[GraphBuilder-YDK.make_tensor_input] x[1:batchx2x8x8]
[GraphBuilder-YDK] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
custom-dec¶
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=[2, 2, 8, 4], stride=[128, 8, 16, 1], storage_offset=None
--DEBUG--
[GraphBuilder-BRS] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--CONSTRAINTS--
batch = {'s0'}
s0 = {'batch'}
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
s0 = 's0'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s0': 'batch'}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, torch.Size([s0, 2, 8, 8])))) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', ('val', torch.float32, torch.Size([2, 2, 8, 4])))) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(decomposition_table='default')
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
-- process.progress --
node 1/3 target=aten.as_strided.default
--
[GraphBuilder-BRS.make_tensor_input] x[1:batchx2x8x8]
[GraphBuilder-BRS] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
custom-tracing¶
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=(2, 2, 8, 4), stride=(128, 8, 16, 1), storage_offset=None
--DEBUG--
[GraphBuilder-JMU] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 2, 8, 8])), '')) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.as_strided](args = (%x, (2, 2, 8, 4), (128, 8, 16, 1)), kwargs = {})
return as_strided
-- process.progress --
node 1/3 target=<built-in method as_strided of type object at 0x7fd5086f6ec0>
--
[GraphBuilder-JMU.make_tensor_input] x[1:batchx2x8x8]
[GraphBuilder-JMU] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 2, 8, 8]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[2, 2, 8, ...) -> val_0
Constant(value=[128, 8, 1...) -> val_1
Constant(value=[4]) -> rank_tensor
Constant(value_int=0) -> indices
SequenceEmpty() -> one_seq
Constant(value_int=4) -> rank_0
Loop(rank_0, , one_seq, indices, body=G1) -> one_seq_16, indices_17
Constant(value_ints=[-1]) -> tmp_18
Reshape(x, tmp_18) -> self_flatten
Constant(value_int=0) -> storage_offset
CastLike(storage_offset, indices_17) -> storage_offset_cast
Add(indices_17, storage_offset_cast) -> indices_19
Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,one_seq_1,indices_2 -> cond_out,one_seq_15,indices_13
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='one_seq_1' type='NOTENSOR' shape=None
input: name='indices_2' type='NOTENSOR' shape=None
Constant(value_floats=[1.0]) -> tmp_14
SequenceInsert(one_seq_1, tmp_14) -> one_seq_15
Constant(value=4) -> rank_3_cast
Sub(rank_3_cast, i) -> tmp
Constant(value=1) -> int64_1_cast
Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
Expand(indices_2, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Constant(value=0) -> int64_0_cast
Constant(value=1) -> int64_1_5_cast
Range(int64_0_cast, size_dim_j, int64_1_5_cast) -> tmp_6
Mul(tmp_6, stride_dim_j) -> add_value
Constant(value=0) -> int64_0_7_cast
Equal(i, int64_0_7_cast) -> cond
If(cond, then_branch=G2, else_branch=G3) -> shape_11
Reshape(add_value, shape_11) -> add_value_12
Add(indices_4, add_value_12) -> indices_13
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
output: name='indices_13' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenInterpolate¶
forward¶
def forward(self, x):
y = torch.nn.functional.interpolate(
x,
scale_factor=2.0,
mode="bilinear",
recompute_scale_factor=False,
)
return y
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> _shape_x0
Concat(_shape_x0, init7_s2_6_8, axis=0) -> _onx_concat__shape_x00
Resize(x, , , _onx_concat__shape_x00, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 2, 6, 8]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> _shape_x0
Concat(_shape_x0, init7_s2_6_8, axis=0) -> _onx_concat__shape_x00
Resize(x, , , _onx_concat__shape_x00, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 2, 6, 8]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> _shape_x0
Concat(_shape_x0, init7_s2_6_8, axis=0) -> _onx_concat__shape_x00
Resize(x, , , _onx_concat__shape_x00, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 2, 3, 4]
Constant(value_floats=[1.0,1.0,2.0,2.0]) -> val_0
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=['s0', 2, 6, 8]
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenNonZero¶
forward¶
def forward(self, x):
y = torch.nonzero(x)
return y
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x0
Transpose(_onx_nonzero_x0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x0
Transpose(_onx_nonzero_x0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x0
Transpose(_onx_nonzero_x0, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['u0', 2]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
AtenNonZeroTuple¶
forward¶
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u1']
output: name='output_1' type=dtype('int64') shape=['u1']
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u3']
output: name='output_1' type=dtype('int64') shape=['u3']
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
NonZero(x) -> _onx_nonzero_x0
Split(_onx_nonzero_x0, num_outputs=2) -> _onx_split_nonzero_x00, _onx_split_nonzero_x01
Reshape(_onx_split_nonzero_x00, init7_s1_-1) -> output_0
Reshape(_onx_split_nonzero_x01, init7_s1_-1) -> output_1
output: name='output_0' type=dtype('int64') shape=['d_output_0_0']
output: name='output_1' type=dtype('int64') shape=['d_output_1_0']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
Constant(value_ints=[1]) -> unbind_axis
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
Split(nonzero, axis=1, num_outputs=2) -> unbind_split_0, unbind_split_1
Squeeze(unbind_split_0, unbind_axis) -> unbind_split_0_squeeze
Identity(unbind_split_0_squeeze) -> getitem
Squeeze(unbind_split_1, unbind_axis) -> unbind_split_1_squeeze
Identity(unbind_split_1_squeeze) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['u0']
output: name='getitem_1' type=dtype('int64') shape=['u0']
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
AtenRollPos¶
forward¶
def forward(self, x):
return torch.roll(x, 1, -1)
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 3, 4]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[-1]) -> dim_tensor
Constant(value=[1]) -> shift_tensor
Constant(value=[3]) -> slice_length_3
Constant(value_ints=[0]) -> tmp_4
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Size(x) -> tmp_5
Reshape(tmp_5, neg_1) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
output: name='roll' type=dtype('float32') shape=['s0', 3, 4]
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenRollRelu¶
forward¶
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
Relu(_onx_concat_slice_x00) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
Relu(_onx_concat_slice_x00) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x02
Concat(_onx_slice_x0, _onx_slice_x02, axis=-1) -> _onx_concat_slice_x00
Relu(_onx_concat_slice_x00) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 3, 4]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[-1]) -> dim_tensor
Constant(value=[-1]) -> shift_tensor
Constant(value=[1]) -> slice_length_3
Constant(value_ints=[0]) -> tmp_4
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Size(x) -> tmp_5
Reshape(tmp_5, neg_1) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=['s0', 3, 4]
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
BuildInIsInstance¶
forward¶
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.431, 0.023, -0.047], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.475], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.192, 0.37 , 0.174], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.424], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub_4
Add(sub_4, mul_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Type is unknown for result 'lx', known_types={'x': 1, 'linear.weight': 1, 'linear.bias': 1, '_sub_Linear_input_1': 1, '_sub_Linear__onx_transpose_weight0': 1, '_sub_Linear__onx_matmul_input_10': 1, '_sub_Linear_linear': 1, '_sub_Linear_output': 1, 'linear': 1, 'sigmoid': 1, 'buff': 1, 'sub': 1}
--DEBUG--
[GraphBuilder-OSU] Message starts, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'},
{'axis': 0, 'input_name': ('lx', 0)},
{'axis': 0, 'input_name': ('lx', 1)}]}
dynamic_dimensions_source_flat=['x', ('lx', 0), ('lx', 1)]
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}],
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
'_sub_Linear__onx_transpose_weight0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul_input_10': ('batch', 1),
'_sub_Linear__onx_transpose_weight0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'sub': ('batch', 1),
'x': ('batch', 3)}
_known_value_shape={}
_known_constants=['_sub_Linear__onx_transpose_weight0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
add -> {output}
buff -> {sub}
linear -> {sigmoid}
lx -> {add}
sigmoid -> {sub}
sub -> {add}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
lx: ('run_node', ('', '')) --- :::
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:('batch', 1):
add: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list | torch.Tensor [num_users=1] = placeholder[target=lx]
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %lx), kwargs = {})
return add
-- process.progress --
node 6/8 target=<built-in function add>
--
[GraphBuilder-OSU.make_tensor_input] x[1:batchx3]
[GraphBuilder-OSU.make_tensor_input] lx[0:]
[GraphBuilder-OSU.make_initializer] linear.weight[torch.float32:torch.float32:[-0.369383841753006, 0.3992883265018463, -0.3257561922073364]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-OSU.make_initializer] linear.bias[torch.float32:torch.float32:[-0.14722837507724762]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-OSU.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-OSU.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-OSU.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-OSU.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-OSU.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-OSU.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-OSU.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-OSU.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-OSU.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-OSU] Message completed, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.062, 0.492, 0.576], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.56], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
BuildInLen¶
forward¶
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.355, 0.159, -0.052], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.511], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.304, -0.505, -0.106], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.286], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub_4
Add(sub_4, mul_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
len(.) expects an integer, len needs to be replaced. You should use _len.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.152, -0.416, 0.442], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.312], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
ComplexPolar¶
forward¶
def forward(self, x, angle):
return torch.polar(x, angle)
custom-fallback¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.
custom-dec¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.
custom-tracing¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast_sin_angle00) of operator (Mul) in node (polar5) is invalid.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
input: name='angle' type=dtype('float32') shape=['s0', 4]
Constant(value=[-1]) -> int64_m1_1d
Cos(angle) -> tmp
Mul(x, tmp) -> tmp_0
Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
Mul(x, tmp_1) -> tmp_2
Constant(value=[-1]) -> int64_m1_1d_3
Unsqueeze(tmp_2, int64_m1_1d_3) -> imag
Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=['s0', 4, 2]
script¶
FAILED
Exporting the operator 'aten::polar' to ONNX opset version 17 is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: https://github.com/pytorch/pytorch/issues.
ControlFlowCond¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
no dynamic shape
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Inputs¶
forward¶
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
input: 'y'
Cos(x) -> cos
Add(cos, y) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
input: 'y'
Cos(x) -> output_0
Sin(x) -> sin
Add(sin, y) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
input: 'y'
Cos(x) -> cos
Add(cos, y) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
input: 'y'
Cos(x) -> output_0
Sin(x) -> sin
Add(sin, y) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
no dynamic shape
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Outputs¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Cos(x) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
Sin(x) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Cos(x) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
Sin(x) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
no dynamic shape
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondConstant¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Constant(value=[1024]) -> init7_s1_1024
Sin(x) -> sin
Shape(x, end=1, start=0) -> _shape_x0
Concat(_shape_x0, init7_s1_1024, axis=0) -> _onx_concat_unsqueeze_sym_size_int00
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int00, value=[1.0]) -> ones
Sub(sin, ones) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Constant(value=[1, 1024]) -> init7_s2_1_1024
ConstantOfShape(init7_s2_1_1024, value=[1.0]) -> ones
Cos(x) -> cos
Add(cos, ones) -> output_0
output: name='output_0' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Constant(value=[1024]) -> init7_s1_1024
Shape(x, end=1, start=0) -> _shape_x0
Concat(_shape_x0, init7_s1_1024, axis=0) -> _onx_concat_unsqueeze_sym_size_int00
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int00, value=[1.0]) -> ones
Sin(x) -> sin
Sub(sin, ones) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Constant(value=[1, 1024]) -> init7_s2_1_1024
ConstantOfShape(init7_s2_1_1024, value=[1.0]) -> ones
Cos(x) -> cos
Add(cos, ones) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=['s0', 1024]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0', 1024]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> sub_3_true_graph_0
Constant(value=[-1]) -> val_1
Shape(x, end=1, start=0) -> val_0_2
Squeeze(val_0_2) -> sym_size_int
Reshape(sym_size_int, val_1, allowzero=0) -> val_2
Sin(x) -> sin
Constant(value=[1024]) -> val_3
Concat(val_2, val_3, axis=0) -> val_4
Constant(value=1.0) -> val_7
Expand(val_7, val_4) -> ones
Sub(sin, ones) -> sub_3_true_graph_0
output: name='sub_3_true_graph_0' type=dtype('float32') shape=['', 1024]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> add_6_false_graph_0
Constant(value=[[1.0, 1.0...) -> ones_2
Cos(x) -> cos
Add(cos, ones_2) -> add_6_false_graph_0
output: name='add_6_false_graph_0' type=dtype('float32') shape=['s0', 1024]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondNestedModule¶
forward¶
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast_x0
Mul(_onx_cast_x0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast_x0
Div(_onx_cast_x0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Abs(x) -> abs_1
ReduceSum(abs_1, keepdims=0) -> sum_1
Constant(value=100) -> init7_s_100
Greater(sum_1, init7_s_100) -> gt
If(gt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Cast(x, to=1) -> _onx_cast_x0
Sub(_onx_cast_x0, p_weight) -> output_0
output: name='output_0' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast_x0
Mul(_onx_cast_x0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast_x0
Div(_onx_cast_x0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Abs(x) -> abs_1
ReduceSum(abs_1, keepdims=0) -> sum_1
Constant(value=100) -> init7_s_100
Greater(sum_1, init7_s_100) -> gt
If(gt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Cast(x, to=1) -> _onx_cast_x0
Sub(_onx_cast_x0, p_weight) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('int64') shape=['s0']
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
Constant(value=0) -> val_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, val_0) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_3 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Abs(x) -> abs_1
ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Constant(value=100) -> val_0_2
Greater(sum_1_2, val_0_2) -> gt_2
If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=2 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=2 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=1 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=1 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_3 - att.else_branch=G2 -- level=1 -- -> sub_1_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_1_false_graph_0
output: name='sub_1_false_graph_0' type=dtype('float32') shape=['s0']
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondNonZero¶
forward¶
def forward(self, input_ids, image_features, vocab_size):
def then_branch(input_ids, image_features, vocab_size):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
condition = (input_ids < 0) & (input_ids > -int(1e9))
positions = torch.nonzero(condition, as_tuple=True)
input_ids = input_ids.clamp_min(0).clamp_max(vocab_size)
return (input_ids, positions[0], positions[1])
def else_branch(input_ids, image_features, vocab_size):
r = torch.where(torch.zeros((1, 1), dtype=torch.bool))
return (input_ids, r[0], r[1])
a, b, c = torch.cond(
image_features.numel() > 0,
then_branch,
else_branch,
[input_ids, image_features, vocab_size],
)
return a, b, c
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T7r2,T1r2,int), kwargs=None, exception=
-----
[(ExportOptions(),
UncapturedHigherOrderOpError('Expected true_fn_output and false_fn_output to have same metadata but found:\npair[1] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\npair[2] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 339, in forward\n a, b, c = torch.cond(\n File "/home/xadupre/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 138, in cond\n return cond_op(pred, true_fn, false_fn, operands)\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False),
RuntimeError('Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s1, 12), dtype=torch.int64), FakeTensor(..., size=(s2, s3)), 1025].')),
(ExportOptions(decomposition_table='default'),
UncapturedHigherOrderOpError('Expected true_fn_output and false_fn_output to have same metadata but found:\npair[1] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\npair[2] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 339, in forward\n a, b, c = torch.cond(\n File "/home/xadupre/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 138, in cond\n return cond_op(pred, true_fn, false_fn, operands)\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False, decomposition_table='default'),
RuntimeError('Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s1, 12), dtype=torch.int64), FakeTensor(..., size=(s2, s3)), 1025].')),
(ExportOptions(dynamo=True),
UncapturedHigherOrderOpError('Expected true_fn_output and false_fn_output to have same metadata but found:\npair[1] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\npair[2] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 339, in forward\n a, b, c = torch.cond(\n File "/home/xadupre/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 138, in cond\n return cond_op(pred, true_fn, false_fn, operands)\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(decomposition_table='default', dynamo=True),
UncapturedHigherOrderOpError('Expected true_fn_output and false_fn_output to have same metadata but found:\npair[1] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\npair[2] differ in \'shape: torch.Size([u0]) vs torch.Size([u1])\', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 339, in forward\n a, b, c = torch.cond(\n File "/home/xadupre/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 138, in cond\n return cond_op(pred, true_fn, false_fn, operands)\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(jit=True),
RuntimeError("Type 'Tuple[Tensor, Tensor, int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced"))]
custom-dec¶
FAILED
Expected true_fn_output and false_fn_output to have same metadata but found:
pair[1] differ in 'shape: torch.Size([u0]) vs torch.Size([u1])', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)
pair[2] differ in 'shape: torch.Size([u0]) vs torch.Size([u1])', where lhs is FakeTensor(..., size=(u0,), dtype=torch.int64) and rhs is FakeTensor(..., size=(u1,), dtype=torch.int64)
from user code:
File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 339, in forward
a, b, c = torch.cond(
File "/home/xadupre/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 138, in cond
return cond_op(pred, true_fn, false_fn, operands)
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
custom-tracing¶
FAILED
val is None for node=output, output=(getitem, getitem_1, getitem_2), a='getitem', o='output_0', has_type=False, has_rank=False, has_shape=False,
meta={}
node.__dict__={'graph': <torch.fx.graph.Graph object at 0x7fd2da06f950>, 'name': 'output', 'op': 'output', 'target': 'output', '_input_nodes': {getitem: None, getitem_1: None, getitem_2: None}, '_args': ((getitem, getitem_1, getitem_2),), '_kwargs': {}, 'users': {}, 'type': None, '_sort_key': (11,), '_repr_fn': None, 'meta': {}}
--DEBUG--
[GraphBuilder-UKE] Message starts, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.
--LOCAL FUNCTIONS--
local_functions,_cb_cond_then_branch_0(['x', 'y', 'z']) -> ['output_0', 'output_1', 'output_2']
local_functions,_cb_cond_else_branch_0(['x', 'y', 'z']) -> ['output_0', 'output_1', 'output_2']
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
seq_length = WrapSym(seq_length)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
'seq_length' = <class 'list'>
tuple
'seq_length'
ERR**: <class 'torch.SymInt'>:'seq_length'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'input_ids'},
{'axis': 0, 'input_name': 'image_features'}],
'seq_length': [{'axis': 1, 'input_name': 'image_features'}]}
dynamic_dimensions_source_flat=[0, 1]
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>,
1: <class 'experimental_experiment.torch_interpreter.eval.model_cases.seq_length'>},
None)
_known_types={'_reshape_init7_s_00': 7,
'_reshape_numel0': 7,
'gt': 9,
'image_features': 1,
'init7_s1_1': 7,
'init7_s_0': 7,
'input_ids': 7,
'numel': 7,
'vocab_size': 7}
_known_shapes={'_reshape_init7_s_00': (1,),
'_reshape_numel0': (1,),
'gt': (),
'image_features': ('batch', 'seq_length'),
'init7_s1_1': (1,),
'init7_s_0': (),
'input_ids': ('batch', 12),
'numel': (),
'vocab_size': ()}
_known_value_shape={}
_known_constants=['_reshape_init7_s_00', 'init7_s1_1', 'init7_s_0']
_known_ranks={}
--TORCH-USERS--
_cb_cond_else_branch_0 -> {condcc}
_cb_cond_then_branch_0 -> {condcc}
condcc -> {getitem_1, getitem_2, getitem}
getitem -> {output}
getitem_1 -> {output}
getitem_2 -> {output}
gt -> {condcc}
image_features -> {numel, condcc}
input_ids -> {condcc}
numel -> {gt}
output -> set()
vocab_size -> {condcc}
--TORCH-SHAPES--
input_ids: ('run_node', (('example_value', torch.int64, torch.Size([2, 12])), '')) --- 7:2:('batch', 12):
image_features: ('run_node', (('example_value', torch.float32, torch.Size([2, 16])), '')) --- 1:2:('batch', 'seq_length'):
vocab_size: ('run_node', (('example_value', torch.int64, torch.Size([])), '')) --- 7:0:():
numel: ('run_node', ('', '')) --- 7:0:():
gt: ('run_node', ('', '')) --- 9:0:():
_cb_cond_then_branch_0: ('run_node', ('', '')) --- :::
_cb_cond_else_branch_0: ('run_node', ('', '')) --- :::
condcc: ('run_node', ('', '')) --- :::
getitem: ('run_node', ('', '')) --- :::
getitem_1: ('run_node', ('', '')) --- :::
getitem_2: ('run_node', ('', '')) --- :::
output: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%input_ids : [num_users=1] = placeholder[target=input_ids]
%image_features : [num_users=2] = placeholder[target=image_features]
%vocab_size : [num_users=1] = placeholder[target=vocab_size]
%numel : [num_users=1] = call_method[target=numel](args = (%image_features,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%numel, 0), kwargs = {})
%_cb_cond_then_branch_0 : [num_users=1] = get_attr[target=_cb_cond_then_branch_0]
%_cb_cond_else_branch_0 : [num_users=1] = get_attr[target=_cb_cond_else_branch_0]
%condcc : [num_users=3] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_then_branch_0, %_cb_cond_else_branch_0, [%input_ids, %image_features, %vocab_size]), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 2), kwargs = {})
return (getitem, getitem_1, getitem_2)
-- process.progress --
node 11/12 target=output
--
[GraphBuilder-UKE.make_tensor_input] input_ids[7:batchx12]
[GraphBuilder-UKE.make_tensor_input] image_features[1:batchxseq_length]
[GraphBuilder-UKE.make_tensor_input] vocab_size[7:]
[GraphBuilder-UKE.make_initializer] init7_s_0[int64:int64:[0]] - SOURCE: shape_type_compute._cast_inputs.1(gt)
[GraphBuilder-UKE.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-UKE.make_node] meth_numel [#:# ] Size:['image_features']->['numel']
[GraphBuilder-UKE.make_node] gt [##:# ] Reshape:['numel', 'init7_s1_1']->['_reshape_numel0']
[GraphBuilder-UKE.make_node] gt2 [##:# ] Reshape:['init7_s_0', 'init7_s1_1']->['_reshape_init7_s_00']
[GraphBuilder-UKE.make_node] gt3 [##:# ] Greater:['numel', 'init7_s_0']->['gt']
[GraphBuilder-UKE.make_node] cond [#:--- ] If:['gt']->['condcc#0', 'condcc#1', 'condcc#2']
[GraphBuilder-UKE.make_node] getitemB_tuple [-:- ] Identity:['condcc#0']->['getitem']
[GraphBuilder-UKE.make_node] getitemB_tuple2 [-:- ] Identity:['condcc#1']->['getitem_1']
[GraphBuilder-UKE.make_node] getitemB_tuple3 [-:- ] Identity:['condcc#2']->['getitem_2']
[GraphBuilder-UKE.make_node] .output [-:- ] Identity:['getitem']->['output_0']
[GraphBuilder-UKE.make_node] .output2 [-:- ] Identity:['getitem_1']->['output_1']
[GraphBuilder-UKE.make_node] .output3 [-:- ] Identity:['getitem_2']->['output_2']
[GraphBuilder-UKE] Message completed, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'RuntimeError'>: Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s1, 12), dtype=torch.int64), FakeTensor(..., size=(s2, s3)), 1025].
(Refer to the full stack trace above for more information.)
script¶
FAILED
unable to convert dynamic shapes ({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>, 1: <class 'experimental_experiment.torch_interpreter.eval.model_cases.seq_length'>}, None)
ControlFlowNestCond¶
forward¶
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Constant(value=0.0) -> init1_s_
ReduceSum(x, keepdims=0) -> sum_1
Less(sum_1, init1_s_) -> lt
If(lt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
Neg(x) -> output_0
output: name='output_0' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Constant(value=0.0) -> init1_s_
ReduceSum(x, keepdims=0) -> sum_1
Less(sum_1, init1_s_) -> lt
If(lt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
Neg(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=['s0', 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Constant(value=0.0) -> scalar_tensor_default_2
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Less(sum_1_2, scalar_tensor_default_2) -> lt
If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=2 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=2 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=1 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=1 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=['s0', 3]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowScan¶
forward¶
def forward(self, x):
init = torch.zeros_like(x[0])
carry, out = torch.ops.higher_order.scan(
ControlFlowScan.add, [init], [x], reverse=False, additional_inputs=[]
)
return carry
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Add(arg0_1, arg1_1) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Add(arg0_1, arg1_1) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fd3f920e4e0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScan2Carried¶
forward¶
def forward(self, x):
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
ControlFlowScan2Carried.add,
[init1, init2],
[x, x * 2],
# dim=0, # 01/31/2025, not supported anymore
reverse=False,
additional_inputs=[],
)
return carry1, carry2, out1, out2
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(x, _reshape_init1_s_0) -> _onx_mul_x0
Scan(zeros_like, ones_like, x, _onx_mul_x0, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['batch', 4]
output: name='output_3' type=dtype('float32') shape=['batch', 4]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, init_1_ones_like, scan_0_x, scan_1_mul) -> output_0, output_1, output_2, output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
input: 'arg3_1'
Add(arg0_1, arg2_1) -> output_0
Identity(output_0) -> output_2
Mul(arg1_1, arg3_1) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
output: name='output_2' type=? shape=?
output: name='output_3' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(x, _reshape_init1_s_0) -> _onx_mul_x0
Scan(zeros_like, ones_like, x, _onx_mul_x0, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['batch', 4]
output: name='output_3' type=dtype('float32') shape=['batch', 4]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, init_1_ones_like, scan_0_x, scan_1_mul) -> output_0, output_1, output_2, output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
input: 'arg3_1'
Add(arg0_1, arg2_1) -> output_0
Identity(output_0) -> output_2
Mul(arg1_1, arg3_1) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
output: name='output_2' type=? shape=?
output: name='output_3' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fd3f920e4e0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist¶
forward¶
def forward(self, x):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDist.dist,
[x],
[x],
# dim=0, # 01/31/2025, not supported anymore
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_x, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_x, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> view
Sub(arg0_1, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul_4, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fd3f920e4e0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist2¶
forward¶
def forward(self, x):
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
ControlFlowScanCDist2.dist,
[z],
[x],
# dim=0, # 01/31/2025, not supported anymore
reverse=False,
additional_inputs=[y],
)
return out[1]
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_detach_, scan_0_x, hidden_input_scan_0_clone) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg2_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
Identity(x) -> hidden_input_scan_0_clone_1
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_clone,scan_0_x -> output_0,output_1
input: name='init_0_clone' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_clone, scan_0_x, hidden_input_scan_0_clone_1) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> view
Sub(arg2_1, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul_4, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
[CustomProxy(clone)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>,)
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fd3f920e4e0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%clone], [%x], False, [%clone_1]), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDistXY¶
forward¶
def forward(self, x, y):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDistXY.dist,
[y],
[x],
# dim=0, # 01/31/2025, not supported anymore
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_y, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-dec¶
opset: domain='' version=18
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_y, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> view
Sub(arg0_1, view) -> sub_4
Mul(sub_4, sub_4) -> mul_7
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul_7, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fd3f920e4e0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
CreateFromShape¶
forward¶
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> _shape_x0
Shape(x, end=2, start=1) -> _shape_x02
Squeeze(_shape_x02) -> sym_size_int_3
Add(sym_size_int_3, init7_s_1) -> _onx_add_sym_size_int_30
Unsqueeze(_onx_add_sym_size_int_30, init7_s1_0) -> _onx_unsqueeze_add0
Concat(_shape_x0, _onx_unsqueeze_add0, axis=0) -> _onx_concat_unsqueeze_sym_size_int_200
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int_200, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy+1']
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> _shape_x0
Shape(x, end=2, start=1) -> _shape_x02
Squeeze(_shape_x02) -> sym_size_int_3
Add(sym_size_int_3, init7_s_1) -> _onx_add_sym_size_int_30
Unsqueeze(_onx_add_sym_size_int_30, init7_s1_0) -> _onx_unsqueeze_add0
Concat(_shape_x0, _onx_unsqueeze_add0, axis=0) -> _onx_concat_unsqueeze_sym_size_int_200
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int_200, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy+1']
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 's1']
Constant(value=1.0) -> val_10
Shape(x, end=1, start=0) -> val_0
Squeeze(val_0) -> sym_size_int_2
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Constant(value=1) -> val_2
Add(sym_size_int_3, val_2) -> add
Constant(value=[-1]) -> val_3
Reshape(sym_size_int_2, val_3, allowzero=0) -> val_4
Constant(value=[-1]) -> val_5
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_4, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['s0', 's1 + 1']
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
CreateFromShapeThroughFunction¶
forward¶
def forward(self, x):
dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> _shape_x0
Shape(x, end=2, start=1) -> _shape_x02
Squeeze(_shape_x02) -> sym_size_int_3
Add(sym_size_int_3, init7_s_1) -> _onx_add_sym_size_int_30
Unsqueeze(_onx_add_sym_size_int_30, init7_s1_0) -> _onx_unsqueeze_add0
Concat(_shape_x0, _onx_unsqueeze_add0, axis=0) -> _onx_concat_unsqueeze_sym_size_int_200
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int_200, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy+1']
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> _shape_x0
Shape(x, end=2, start=1) -> _shape_x02
Squeeze(_shape_x02) -> sym_size_int_3
Add(sym_size_int_3, init7_s_1) -> _onx_add_sym_size_int_30
Unsqueeze(_onx_add_sym_size_int_30, init7_s1_0) -> _onx_unsqueeze_add0
Concat(_shape_x0, _onx_unsqueeze_add0, axis=0) -> _onx_concat_unsqueeze_sym_size_int_200
ConstantOfShape(_onx_concat_unsqueeze_sym_size_int_200, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy+1']
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 's1']
Constant(value=1.0) -> val_10
Shape(x, end=1, start=0) -> val_0
Squeeze(val_0) -> sym_size_int_2
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Constant(value=1) -> val_2
Add(sym_size_int_3, val_2) -> add
Constant(value=[-1]) -> val_3
Reshape(sym_size_int_2, val_3, allowzero=0) -> val_4
Constant(value=[-1]) -> val_5
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_4, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['s0', 's1 + 1']
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
CropLastDimensionWithTensorContent¶
forward¶
def forward(self, x, shape):
return x[..., : shape[0]]
custom-fallback¶
- class GraphModule(torch.nn.Module):
- def forward(self, L_x_: “f32[s0, s1, s2][s1*s2, s2, 1]cpu”, L_shape_: “i64[1][1]cpu”):
l_x_ = L_x_ l_shape_ = L_shape_
# File: /home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:873 in forward, code: return x[…, : shape[0]]
getitem: “i64[][]cpu” = l_shape_[0]; l_shape_ = getitem = None
- def forward(self, arg0_1: “f32[s0, s1, s2]”, arg1_1: “i64[1]”):
# File: /home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:873 in forward, code: return x[…, : shape[0]]
select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None
- class GraphModule(torch.nn.Module):
- def forward(self, L_x_: “f32[s0, s1, s2][s1*s2, s2, 1]cpu”, L_shape_: “i64[1][1]cpu”):
l_x_ = L_x_ l_shape_ = L_shape_
# File: /home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:873 in forward, code: return x[…, : shape[0]]
getitem: “i64[][]cpu” = l_shape_[0]; l_shape_ = getitem = None
- def forward(self, arg0_1: “f32[s0, s1, s2]”, arg1_1: “i64[1]”):
# File: /home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py:873 in forward, code: return x[…, : shape[0]]
select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None item: “Sym(u0)” = torch.ops.aten.item.default(select); select = item = None
- def forward(self, arg0_1: “f32[s0, s1, s2]”, arg1_1: “i64[1]”):
# No stacktrace found for following nodes select: “i64[]” = torch.ops.aten.select.int(arg1_1, 0, 0); arg1_1 = None _to_copy: “i32[]” = torch.ops.aten._to_copy.default(select, dtype = torch.int32); select = None _local_scalar_dense: “Sym(u0)” = torch.ops.aten._local_scalar_dense.default(_to_copy); _to_copy = None slice_1 = torch.ops.aten.slice.Tensor(arg0_1, 2, 0, _local_scalar_dense); arg0_1 = _local_scalar_dense = slice_1 = None
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T1r3,T7r1), kwargs=None, exception=
-----
[(ExportOptions(),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 873, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:670 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 873, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(decomposition_table='default'),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 873, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False, decomposition_table='default'),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:670 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 873, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(dynamo=True),
UserError('Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs` has 2 elements, but `dynamic_shapes` has 1 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation')),
(ExportOptions(decomposition_table='default', dynamo=True),
UserError('Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs` has 2 elements, but `dynamic_shapes` has 1 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation')),
(ExportOptions(jit=True),
GuardOnDataDependentSymNode('Could not guard on data-dependent expression u0 < 0 (unhinted: u0 < 0). (Size-like symbols: none)\n\nCaused by: (_decomp/decompositions.py:734 in slice_forward)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "<string>", line 1, in <lambda>\n\n\nWhile executing %slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %_local_scalar_dense_default, 1), kwargs = {})\nGraphModule: class GraphModule(torch.nn.Module):\n def forward(self, x, shape):\n # No stacktrace found for following nodes\n select_int = torch.ops.aten.select.int(shape, 0, 0); shape = None\n _to_copy_default = torch.ops.aten._to_copy.default(select_int, dtype = torch.int32); select_int = None\n _local_scalar_dense_default = torch.ops.aten._local_scalar_dense.default(_to_copy_default); _to_copy_default = None\n slice_tensor = torch.ops.aten.slice.Tensor(x, 2, 0, _local_scalar_dense_default, 1); x = _local_scalar_dense_default = None\n return slice_tensor\n \n\nOriginal traceback:\nNone'))]
custom-dec¶
FAILED
When `dynamic_shapes` is specified as a dict, its top-level keys must be the arg names ['x', 'shape'] of `inputs`, but here they are ['x']. Alternatively, you could also ignore arg names entirely and specify `dynamic_shapes` as a list/tuple matching `inputs`. For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Gather(shape, init7_s1_0) -> _onx_gather_shape0
Slice(x, init7_s1_0, _onx_gather_shape0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: When `dynamic_shapes` is specified as a dict, its top-level keys must be the arg names ['x', 'shape'] of `inputs`, but here they are ['x']. Alternatively, you could also ignore arg names entirely and specify `dynamic_shapes` as a list/tuple matching `inputs`. For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
(Refer to the full stack trace above for more information.)
script¶
FAILED
number of input names provided (3) exceeded number of inputs (2)
CropLastDimensionWithTensorShape¶
forward¶
def forward(self, x, y):
return x[..., : y.shape[0]]
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(y, end=1, start=0) -> _shape_y0
Slice(x, init7_s1_0, _shape_y0, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'crop']
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(y, end=1, start=0) -> _shape_y0
Slice(x, init7_s1_0, _shape_y0, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'crop']
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_0) -> _onx_gather_getattr_10
Slice(x, init7_s1_0, _onx_gather_getattr_10, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4, 4]
input: name='y' type=dtype('float32') shape=['s1']
Constant(value_ints=[1]) -> val_12
Shape(y, end=1, start=0) -> val_0
Squeeze(val_0) -> sym_size_int_4
Constant(value=[0]) -> val_4
Constant(value_ints=[-1]) -> val_6
Reshape(sym_size_int_4, val_6, allowzero=0) -> val_7
Constant(value=[2]) -> val_11
Slice(x, val_4, val_7, val_11, val_12) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['s0', 4, 's1']
script¶
FAILED
number of input names provided (3) exceeded number of inputs (2)
InplaceAdd¶
forward¶
def forward(self, x):
x += self.bias
return x
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceAdd2¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-MOM] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
return add_
-- process.progress --
node 2/4 target=add_
--
[GraphBuilder-MOM.make_tensor_input] x[1:batchx4]
[GraphBuilder-MOM.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-MOM] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceAdd_Mul¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x * 2
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Add(x, c_bias) -> add_
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(add_, _reshape_init1_s_0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Add(x, c_bias) -> output_0
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-QVG] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {mul}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
return mul
-- process.progress --
node 2/5 target=add_
--
[GraphBuilder-QVG.make_tensor_input] x[1:batchx4]
[GraphBuilder-QVG.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-QVG] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
Constant(value=2.0) -> scalar_tensor_default
Mul(add_3, scalar_tensor_default) -> mul_4
output: name='mul_4' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceCloneAdd¶
forward¶
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(clone, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-SUY] Message starts, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'bias': 1, 'clone': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'clone': ('batch', 4), 'x': ('batch', 4)}
_known_value_shape={}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
clone -> {add_}
x -> {clone}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
clone: ('run_node', ('', '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
return add_
-- process.progress --
node 3/5 target=add_
--
[GraphBuilder-SUY.make_tensor_input] x[1:batchx4]
[GraphBuilder-SUY.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-SUY.make_node] .clone [#:# ] Identity:['x']->['clone']
[GraphBuilder-SUY] Message completed, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_6
output: name='add_6' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceSetItemEllipsis_1¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
FAILED
Issue with one input name and its associated dynamic shape dyn_name='x', input_name=index
--DEBUG--
[GraphBuilder-KNG] Message starts, there are 12 initializers, 18 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
s0 = 's0'
s0*8192 = 's0*8192'
s1 = 's1'
s2 = 's2'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_onx_add_add_mul__reshape_init7_s1_00000': 7,
'_onx_add_mul__reshape_init7_s1_0000': 7,
'_onx_expand_update0': 1,
'_onx_gatherelements_init7_s32768_0': 7,
'_onx_mul__reshape_init7_s1_000': 7,
'_onx_mul__reshape_init7_s8192_00': 7,
'_onx_scatterelements__reshape_clone00': 1,
'_reshape_add_add_mul__reshape_init7_s1_000000': 7,
'_reshape_clone0': 1,
'_reshape_expand_update00': 1,
'_reshape_index0': 7,
'_reshape_init7_s1_00': 7,
'_reshape_init7_s8192_0': 7,
'_shape_index0': 7,
'_shape_init7_s1_00': 7,
'_shape_init7_s8192_20': 7,
'add-_onx_mul__reshape_init7_s1_000': 7,
'b_lifted_tensor_5': 1,
'clone': 1,
'index': 7,
'index_put_': 1,
'init7_s1_-1': 7,
'init7_s1_0': 7,
'init7_s1_32768': 7,
'init7_s1_4': 7,
'init7_s32768_': 7,
'init7_s3_-1_1_1': 7,
'init7_s3_1_-1_1': 7,
'init7_s3_1_1_-1': 7,
'init7_s3_1_8192_4': 7,
'init7_s8192_': 7,
'init7_s8192_2': 7,
'output_0': 1,
'update': 1}
_known_shapes={'_onx_add_add_mul__reshape_init7_s1_00000': (1, 8192, 's0'),
'_onx_add_mul__reshape_init7_s1_0000': (1, 8192, 1),
'_onx_concat__shape_init7_s1_000': (3,),
'_onx_expand_update0': (1, 8192, 's0'),
'_onx_gatherelements_init7_s32768_0': ('s0*8192',),
'_onx_mul__reshape_init7_s1_000': (1, 1, 1),
'_onx_mul__reshape_init7_s8192_00': (1, 8192, 1),
'_onx_scatterelements__reshape_clone00': (32768,),
'_reshape_add_add_mul__reshape_init7_s1_000000': ('s0*8192',),
'_reshape_clone0': (32768,),
'_reshape_expand_update00': ('s0*8192',),
'_reshape_index0': (1, 1, 's0'),
'_reshape_init7_s1_00': (1, 1, 1),
'_reshape_init7_s8192_0': (1, 8192, 1),
'_shape_index0': (1,),
'_shape_init7_s1_00': (1,),
'_shape_init7_s8192_20': (1,),
'add-_onx_mul__reshape_init7_s1_000': (1, 1, 's0'),
'b_lifted_tensor_5': (1, 8192, 4),
'clone': (1, 8192, 4),
'index': ('s0',),
'index_put_': (1, 8192, 4),
'init7_s1_-1': (1,),
'init7_s1_0': (1,),
'init7_s1_32768': (1,),
'init7_s1_4': (1,),
'init7_s32768_': (32768,),
'init7_s3_-1_1_1': (3,),
'init7_s3_1_-1_1': (3,),
'init7_s3_1_1_-1': (3,),
'init7_s3_1_8192_4': (3,),
'init7_s8192_': (8192,),
'init7_s8192_2': (8192,),
'output_0': (1, 8192, 4),
'update': ('s1', 's2')}
_known_value_shape={'_onx_concat__shape_init7_s1_000': (1, 8192, 's0'),
'_shape_index0': ('s0',),
'_shape_init7_s1_00': (1,),
'_shape_init7_s8192_20': (8192,),
'init7_s1_0': (0,),
'init7_s1_32768': (32768,),
'init7_s1_4': (4,),
'init7_s8192_2': '(int,...)#8192'}
_known_constants=['_onx_mul__reshape_init7_s1_000',
'_onx_mul__reshape_init7_s8192_00',
'_reshape_clone0',
'_reshape_init7_s1_00',
'_reshape_init7_s8192_0',
'_shape_init7_s1_00',
'_shape_init7_s8192_20',
'b_lifted_tensor_5',
'init7_s1_-1',
'init7_s1_0',
'init7_s1_32768',
'init7_s1_4',
'init7_s32768_',
'init7_s3_-1_1_1',
'init7_s3_1_-1_1',
'init7_s3_1_1_-1',
'init7_s3_1_8192_4',
'init7_s8192_',
'init7_s8192_2']
_known_ranks={}
--TORCH-USERS--
b_lifted_tensor_5 -> {clone}
clone -> {index_put_}
index -> {index_put_}
index_put_ -> {output}
output -> set()
update -> {index_put_}
--TORCH-SHAPES--
b_lifted_tensor_5: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 4])))) --- 1:3:(1, 8192, 4):
index: ('run_node', ('', ('val', torch.int64, torch.Size([s0])))) --- 7:1:('s0',):
update: ('run_node', ('', ('val', torch.float32, torch.Size([s1, s2])))) --- 1:2:('s1', 's2'):
clone: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 4])))) --- 1:3:(1, 8192, 4):
index_put_: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 4])))) --- 1:3:(1, 8192, 4):
output: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(fallback=True)
-- process.graph_module --
graph():
%b_lifted_tensor_5 : [num_users=1] = placeholder[target=b_lifted_tensor_5]
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%b_lifted_tensor_5,), kwargs = {})
%index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
return (index_put_,)
-- process.progress --
node 5/6 target=output
--
[GraphBuilder-KNG.make_tensor_input] index[7:s0]
[GraphBuilder-KNG.make_tensor_input] update[1:s1xs2]
[GraphBuilder-KNG.make_initializer] b_lifted_tensor_5[torch.float32:torch.float32] - SOURCE: DynamoInterpret.placeholder.0
[GraphBuilder-KNG.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s3_-1_1_1[int64:int64:[-1, 1, 1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s8192_[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-KNG.make_initializer] init7_s3_1_-1_1[int64:int64:[1, -1, 1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s3_1_1_-1[int64:int64:[1, 1, -1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s8192_2[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-KNG.make_initializer] init7_s1_32768[int64:int64:[32768]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s1_4[int64:int64:[4]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s1_-1[int64:int64:[-1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-KNG.make_initializer] init7_s32768_[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-KNG.make_initializer] init7_s3_1_8192_4[int64:int64:[1, 8192, 4]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_ [##:# ] Reshape:['init7_s1_0', 'init7_s3_-1_1_1']->['_reshape_init7_s1_00']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_2 [##:# ] Reshape:['init7_s8192_', 'init7_s3_1_-1_1']->['_reshape_init7_s8192_0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_3 [##:# ] Reshape:['index', 'init7_s3_1_1_-1']->['_reshape_index0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_4 [#:# ] Shape:['init7_s1_0']->['_shape_init7_s1_00']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_5 [#:# ] Shape:['init7_s8192_2']->['_shape_init7_s8192_20']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_6 [#:# ] Shape:['index']->['_shape_index0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_7 [###:W ] Concat:['_shape_init7_s1_00', '_shape_init7_s8192_20', '_shape_index0']->['_onx_concat__shape_init7_s1_000']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_8 [#W:# ] Expand:['update', '_onx_concat__shape_init7_s1_000']->['_onx_expand_update0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_9 [##:# ] Mul:['_reshape_init7_s1_00', 'init7_s1_32768']->['_onx_mul__reshape_init7_s1_000']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_10 [##:# ] Mul:['_reshape_init7_s8192_0', 'init7_s1_4']->['_onx_mul__reshape_init7_s8192_00']
[GraphBuilder-KNG.make_node] SwitchOrderBinaryPattern--index_put_3ioioi1x3v2_12 [##:# ] Add:['_onx_mul__reshape_init7_s1_000', '_reshape_index0']->['add-_onx_mul__reshape_init7_s1_000']
[GraphBuilder-KNG.make_node] SwitchOrderBinaryPattern--index_put_3ioioi1x3v2_122 [##:# ] Add:['add-_onx_mul__reshape_init7_s1_000', '_onx_mul__reshape_init7_s8192_00']->['_onx_add_add_mul__reshape_init7_s1_00000']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_13 [##:# ] Reshape:['_onx_add_add_mul__reshape_init7_s1_00000', 'init7_s1_-1']->['_reshape_add_add_mul__reshape_init7_s1_000000']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_14 [##:# ] GatherElements:['init7_s32768_', '_reshape_add_add_mul__reshape_init7_s1_000000']->['_onx_gatherelements_init7_s32768_0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_15 [##:# ] Reshape:['_onx_expand_update0', 'init7_s1_-1']->['_reshape_expand_update00']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_16 [##:# ] Reshape:['b_lifted_tensor_5', 'init7_s1_-1']->['_reshape_clone0']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_17 [###:# ] ScatterElements:['_reshape_clone0', '_onx_gatherelements_init7_s32768_0', '_reshape_expand_update00']->['_onx_scatterelements__reshape_clone00']
[GraphBuilder-KNG.make_node] index_put_3ioioi1x3v2_18 [##:# ] Reshape:['_onx_scatterelements__reshape_clone00', 'init7_s3_1_8192_4']->['output_0']
[GraphBuilder-KNG.make_tensor_output] output_0[1:1x8192x4]
[GraphBuilder-KNG] Message completed, there are 12 initializers, 18 nodes, 2 inputs, 2 outputs.
custom-dec¶
FAILED
When `dynamic_shapes` is specified as a dict, its top-level keys must be the arg names ['index', 'update'] of `inputs`, but here they are ['x']. Alternatively, you could also ignore arg names entirely and specify `dynamic_shapes` as a list/tuple matching `inputs`. For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
custom-tracing¶
FAILED
setitem is not implemented when indices=(Ellipsis, 'index') and rank is unknown or not equal to the number of indices
--DEBUG--
[GraphBuilder-RWQ] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 4), 'index': (4,), 'update': (8192, 4)}
_known_value_shape={}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 4):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-RWQ.make_tensor_input] index[7:4]
[GraphBuilder-RWQ.make_tensor_input] update[1:8192x4]
[GraphBuilder-RWQ.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-RWQ] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("node_Unsqueeze_2", Unsqueeze, "", -1) : ("","val_0": tensor(int64),) -> ("val_1",) , Error Node (node_Unsqueeze_2)'s input 0 is marked single but has an empty string in the graph
script¶
FAILED
number of input names provided (4) exceeded number of inputs (2)
InplaceSetItemEllipsis_2¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
FAILED
Issue with one input name and its associated dynamic shape dyn_name='x', input_name=index
--DEBUG--
[GraphBuilder-WBO] Message starts, there are 12 initializers, 18 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
s0 = 's0'
s0*8192 = 's0*8192'
s1 = 's1'
s2 = 's2'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_onx_add_add_mul__reshape_init7_s1_00000': 7,
'_onx_add_mul__reshape_init7_s1_0000': 7,
'_onx_expand_update0': 1,
'_onx_gatherelements_init7_s49152_0': 7,
'_onx_mul__reshape_init7_s1_000': 7,
'_onx_mul__reshape_init7_s8192_00': 7,
'_onx_scatterelements__reshape_clone00': 1,
'_reshape_add_add_mul__reshape_init7_s1_000000': 7,
'_reshape_clone0': 1,
'_reshape_expand_update00': 1,
'_reshape_index0': 7,
'_reshape_init7_s1_00': 7,
'_reshape_init7_s8192_0': 7,
'_shape_index0': 7,
'_shape_init7_s1_00': 7,
'_shape_init7_s8192_20': 7,
'add-_onx_mul__reshape_init7_s1_000': 7,
'b_lifted_tensor_5': 1,
'clone': 1,
'index': 7,
'index_put_': 1,
'init7_s1_-1': 7,
'init7_s1_0': 7,
'init7_s1_49152': 7,
'init7_s1_6': 7,
'init7_s3_-1_1_1': 7,
'init7_s3_1_-1_1': 7,
'init7_s3_1_1_-1': 7,
'init7_s3_1_8192_6': 7,
'init7_s49152_': 7,
'init7_s8192_': 7,
'init7_s8192_2': 7,
'output_0': 1,
'update': 1}
_known_shapes={'_onx_add_add_mul__reshape_init7_s1_00000': (1, 8192, 's0'),
'_onx_add_mul__reshape_init7_s1_0000': (1, 8192, 1),
'_onx_concat__shape_init7_s1_000': (3,),
'_onx_expand_update0': (1, 8192, 's0'),
'_onx_gatherelements_init7_s49152_0': ('s0*8192',),
'_onx_mul__reshape_init7_s1_000': (1, 1, 1),
'_onx_mul__reshape_init7_s8192_00': (1, 8192, 1),
'_onx_scatterelements__reshape_clone00': (49152,),
'_reshape_add_add_mul__reshape_init7_s1_000000': ('s0*8192',),
'_reshape_clone0': (49152,),
'_reshape_expand_update00': ('s0*8192',),
'_reshape_index0': (1, 1, 's0'),
'_reshape_init7_s1_00': (1, 1, 1),
'_reshape_init7_s8192_0': (1, 8192, 1),
'_shape_index0': (1,),
'_shape_init7_s1_00': (1,),
'_shape_init7_s8192_20': (1,),
'add-_onx_mul__reshape_init7_s1_000': (1, 1, 's0'),
'b_lifted_tensor_5': (1, 8192, 6),
'clone': (1, 8192, 6),
'index': ('s0',),
'index_put_': (1, 8192, 6),
'init7_s1_-1': (1,),
'init7_s1_0': (1,),
'init7_s1_49152': (1,),
'init7_s1_6': (1,),
'init7_s3_-1_1_1': (3,),
'init7_s3_1_-1_1': (3,),
'init7_s3_1_1_-1': (3,),
'init7_s3_1_8192_6': (3,),
'init7_s49152_': (49152,),
'init7_s8192_': (8192,),
'init7_s8192_2': (8192,),
'output_0': (1, 8192, 6),
'update': ('s1', 's2')}
_known_value_shape={'_onx_concat__shape_init7_s1_000': (1, 8192, 's0'),
'_shape_index0': ('s0',),
'_shape_init7_s1_00': (1,),
'_shape_init7_s8192_20': (8192,),
'init7_s1_0': (0,),
'init7_s1_49152': (49152,),
'init7_s1_6': (6,),
'init7_s8192_2': '(int,...)#8192'}
_known_constants=['_onx_mul__reshape_init7_s1_000',
'_onx_mul__reshape_init7_s8192_00',
'_reshape_clone0',
'_reshape_init7_s1_00',
'_reshape_init7_s8192_0',
'_shape_init7_s1_00',
'_shape_init7_s8192_20',
'b_lifted_tensor_5',
'init7_s1_-1',
'init7_s1_0',
'init7_s1_49152',
'init7_s1_6',
'init7_s3_-1_1_1',
'init7_s3_1_-1_1',
'init7_s3_1_1_-1',
'init7_s3_1_8192_6',
'init7_s49152_',
'init7_s8192_',
'init7_s8192_2']
_known_ranks={}
--TORCH-USERS--
b_lifted_tensor_5 -> {clone}
clone -> {index_put_}
index -> {index_put_}
index_put_ -> {output}
output -> set()
update -> {index_put_}
--TORCH-SHAPES--
b_lifted_tensor_5: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 6])))) --- 1:3:(1, 8192, 6):
index: ('run_node', ('', ('val', torch.int64, torch.Size([s0])))) --- 7:1:('s0',):
update: ('run_node', ('', ('val', torch.float32, torch.Size([s1, s2])))) --- 1:2:('s1', 's2'):
clone: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 6])))) --- 1:3:(1, 8192, 6):
index_put_: ('run_node', ('', ('val', torch.float32, torch.Size([1, 8192, 6])))) --- 1:3:(1, 8192, 6):
output: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(fallback=True)
-- process.graph_module --
graph():
%b_lifted_tensor_5 : [num_users=1] = placeholder[target=b_lifted_tensor_5]
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%b_lifted_tensor_5,), kwargs = {})
%index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
return (index_put_,)
-- process.progress --
node 5/6 target=output
--
[GraphBuilder-WBO.make_tensor_input] index[7:s0]
[GraphBuilder-WBO.make_tensor_input] update[1:s1xs2]
[GraphBuilder-WBO.make_initializer] b_lifted_tensor_5[torch.float32:torch.float32] - SOURCE: DynamoInterpret.placeholder.0
[GraphBuilder-WBO.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s3_-1_1_1[int64:int64:[-1, 1, 1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s8192_[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-WBO.make_initializer] init7_s3_1_-1_1[int64:int64:[1, -1, 1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s3_1_1_-1[int64:int64:[1, 1, -1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s8192_2[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-WBO.make_initializer] init7_s1_49152[int64:int64:[49152]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s1_6[int64:int64:[6]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s1_-1[int64:int64:[-1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-WBO.make_initializer] init7_s49152_[int64:int64] - SOURCE: Opset.make_node.0
[GraphBuilder-WBO.make_initializer] init7_s3_1_8192_6[int64:int64:[1, 8192, 6]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_ [##:# ] Reshape:['init7_s1_0', 'init7_s3_-1_1_1']->['_reshape_init7_s1_00']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_2 [##:# ] Reshape:['init7_s8192_', 'init7_s3_1_-1_1']->['_reshape_init7_s8192_0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_3 [##:# ] Reshape:['index', 'init7_s3_1_1_-1']->['_reshape_index0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_4 [#:# ] Shape:['init7_s1_0']->['_shape_init7_s1_00']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_5 [#:# ] Shape:['init7_s8192_2']->['_shape_init7_s8192_20']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_6 [#:# ] Shape:['index']->['_shape_index0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_7 [###:W ] Concat:['_shape_init7_s1_00', '_shape_init7_s8192_20', '_shape_index0']->['_onx_concat__shape_init7_s1_000']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_8 [#W:# ] Expand:['update', '_onx_concat__shape_init7_s1_000']->['_onx_expand_update0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_9 [##:# ] Mul:['_reshape_init7_s1_00', 'init7_s1_49152']->['_onx_mul__reshape_init7_s1_000']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_10 [##:# ] Mul:['_reshape_init7_s8192_0', 'init7_s1_6']->['_onx_mul__reshape_init7_s8192_00']
[GraphBuilder-WBO.make_node] SwitchOrderBinaryPattern--index_put_3ioioi1x3v2_12 [##:# ] Add:['_onx_mul__reshape_init7_s1_000', '_reshape_index0']->['add-_onx_mul__reshape_init7_s1_000']
[GraphBuilder-WBO.make_node] SwitchOrderBinaryPattern--index_put_3ioioi1x3v2_122 [##:# ] Add:['add-_onx_mul__reshape_init7_s1_000', '_onx_mul__reshape_init7_s8192_00']->['_onx_add_add_mul__reshape_init7_s1_00000']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_13 [##:# ] Reshape:['_onx_add_add_mul__reshape_init7_s1_00000', 'init7_s1_-1']->['_reshape_add_add_mul__reshape_init7_s1_000000']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_14 [##:# ] GatherElements:['init7_s49152_', '_reshape_add_add_mul__reshape_init7_s1_000000']->['_onx_gatherelements_init7_s49152_0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_15 [##:# ] Reshape:['_onx_expand_update0', 'init7_s1_-1']->['_reshape_expand_update00']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_16 [##:# ] Reshape:['b_lifted_tensor_5', 'init7_s1_-1']->['_reshape_clone0']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_17 [###:# ] ScatterElements:['_reshape_clone0', '_onx_gatherelements_init7_s49152_0', '_reshape_expand_update00']->['_onx_scatterelements__reshape_clone00']
[GraphBuilder-WBO.make_node] index_put_3ioioi1x3v2_18 [##:# ] Reshape:['_onx_scatterelements__reshape_clone00', 'init7_s3_1_8192_6']->['output_0']
[GraphBuilder-WBO.make_tensor_output] output_0[1:1x8192x6]
[GraphBuilder-WBO] Message completed, there are 12 initializers, 18 nodes, 2 inputs, 2 outputs.
custom-dec¶
FAILED
When `dynamic_shapes` is specified as a dict, its top-level keys must be the arg names ['index', 'update'] of `inputs`, but here they are ['x']. Alternatively, you could also ignore arg names entirely and specify `dynamic_shapes` as a list/tuple matching `inputs`. For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
custom-tracing¶
FAILED
setitem is not implemented when indices=(Ellipsis, 'index') and rank is unknown or not equal to the number of indices
--DEBUG--
[GraphBuilder-TPG] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 6), 'index': (4,), 'update': (8192, 4)}
_known_value_shape={}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 6):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-TPG.make_tensor_input] index[7:4]
[GraphBuilder-TPG.make_tensor_input] update[1:8192x4]
[GraphBuilder-TPG.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-TPG] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("node_Unsqueeze_2", Unsqueeze, "", -1) : ("","val_0": tensor(int64),) -> ("val_1",) , Error Node (node_Unsqueeze_2)'s input 0 is marked single but has an empty string in the graph
script¶
FAILED
number of input names provided (4) exceeded number of inputs (2)
InplaceSetItemMask¶
forward¶
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 3]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> _to_copy
Where(_to_copy, c_lifted_tensor_0, x) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 3, 3]
custom-tracing¶
FAILED
Unexpected type <class 'int'> for name.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3, 3]
Cast(x, to=9) -> _to_copy
Constant(value=2.0) -> clone
Where(_to_copy, clone, x) -> index_put
output: name='index_put' type=dtype('float32') shape=['s0', 3, 3]
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
InplaceSetItemSquare¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
Unexpected type <class 'int'> for name.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=1) -> val_19
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=-1) -> val_42
Constant(value=[[0], [1],...) -> val_43
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
InplaceSetItemSquareAdd¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Add(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Add(output_0, _reshape_init1_s_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
Unexpected type <class 'int'> for name.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=1) -> val_19
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=-1) -> val_42
Constant(value=[[0], [1],...) -> val_43
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
output: name='add' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
InplaceSetItemSquareAdd2¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
init: name='init1_s_2' type=float32 shape=() -- array([3.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Add(output_0, _reshape_init1_s_0) -> output_1
Reshape(init1_s_2, init7_s1_1) -> _reshape_init1_s_20
Add(output_0, _reshape_init1_s_20) -> output_2
output: name='output_1' type=dtype('float32') shape=['batch', 5]
output: name='output_2' type=dtype('float32') shape=['batch', 5]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
init: name='init1_s_2' type=float32 shape=() -- array([3.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _shape_slice_20
Expand(c_lifted_tensor_0, _shape_slice_20) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose_fill0
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose_slice_30
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _reshape_init7_s3_0_1_20
ScatterND(_onx_transpose_slice_30, _reshape_init7_s3_0_1_20, _onx_transpose_fill0) -> _onx_scatternd_transpose_slice_300
Transpose(_onx_scatternd_transpose_slice_300, perm=[1,0]) -> slice_scatter
Shape(x) -> _shape_x0
Gather(_shape_x0, init7_s1_0) -> _onx_gather__shape_x00
Range(init7_s1_0, _onx_gather__shape_x00, init7_s1_1) -> _onx_range_init7_s1_00
Slice(_onx_range_init7_s1_00, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s1_000
Reshape(_onx_slice_range_init7_s1_000, init7_s2_-1_1) -> _reshape_slice_range_init7_s1_0000
ScatterND(x, _reshape_slice_range_init7_s1_0000, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Add(output_0, _reshape_init1_s_0) -> output_1
Reshape(init1_s_2, init7_s1_1) -> _reshape_init1_s_20
Add(output_0, _reshape_init1_s_20) -> output_2
output: name='output_1' type=dtype('float32') shape=['batch', 5]
output: name='output_2' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
Unexpected type <class 'int'> for name.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=1) -> val_19
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Transpose(slice_3, perm=[1,0]) -> val_45
Constant(value=-1) -> val_42
Constant(value=[[0], [1],...) -> val_43
Constant(value=[[1.0, 1.0...) -> val_44
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
Constant(value=3.0) -> scalar_tensor_default_1
Add(slice_scatter_1, scalar_tensor_default_1) -> add_4
output: name='add' type=dtype('float32') shape=['s0', 5]
output: name='add_4' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
SignatureFloat1¶
forward¶
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([1.5], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.096, -0.078, 0.498], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.19], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(b_buff, _reshape_init1_s_0) -> _onx_mul_b_buff0
Sub(sigmoid, _onx_mul_b_buff0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([1.5], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.028, 0.108, 0.377], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.452], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Reshape(init1_s_, init7_s1_1) -> _reshape_init1_s_0
Mul(b_buff, _reshape_init1_s_0) -> _onx_mul_b_buff0
Sub(sigmoid, _onx_mul_b_buff0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-TKY] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=[0]
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes=({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
None)
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
'_sub_Linear__onx_transpose_weight0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'alpha': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul_input_10': ('batch', 1),
'_sub_Linear__onx_transpose_weight0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'alpha': (),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'x': ('batch', 3)}
_known_value_shape={}
_known_constants=['_sub_Linear__onx_transpose_weight0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
alpha -> {mul}
buff -> {mul}
linear -> {sigmoid}
mul -> {sub}
sigmoid -> {sub}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
alpha: ('run_node', (('example_value', torch.float32, torch.Size([])), '')) --- 1:0:():
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
mul: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
-- process.progress --
node 5/8 target=mul
--
[GraphBuilder-TKY.make_tensor_input] x[1:batchx3]
[GraphBuilder-TKY.make_tensor_input] alpha[1:]
[GraphBuilder-TKY.make_initializer] linear.weight[torch.float32:torch.float32:[-0.5548827052116394, -0.03955712169408798, -0.4261837601661682]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-TKY.make_initializer] linear.bias[torch.float32:torch.float32:[-0.23177239298820496]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-TKY.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-TKY.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-TKY.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-TKY.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-TKY.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-TKY.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-TKY.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-TKY.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-TKY] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'
script¶
FAILED
unable to convert dynamic shapes ({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, None)
SignatureInt1¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.568, 0.017, 0.519], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.188], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
Add(sub, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.489, -0.134, 0.232], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.251], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub_2
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
Add(sub_2, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Concat node. Name:'_getitem_slicenSD' Status Message: /home/xadupre/github/onnxruntime/onnxruntime/core/providers/cpu/tensor/concat.cc:139 onnxruntime::common::Status onnxruntime::ConcatBase::PrepareForCompute(onnxruntime::OpKernelContext*, const InlinedTensorsVector&, onnxruntime::Prepare&) const input_rank == reference_rank was false. Ranks of input data are different, cannot concatenate them. expected rank: 1 got: 2
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'
script¶
FAILED
unable to convert dynamic shapes ({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, None)
SignatureInt2¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T1r2,int), kwargs=None, exception=
-----
[(ExportOptions(),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=True, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(strict=False),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=False, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'args\'][0][0].size()[0] in the specified range satisfy the generated guard L[\'args\'][0][0].size()[0] != 9223372036854775807.\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(decomposition_table='default'),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=True, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(strict=False, decomposition_table='default'),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=False, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'args\'][0][0].size()[0] in the specified range satisfy the generated guard L[\'args\'][0][0].size()[0] != 9223372036854775807.\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(dynamo=True),
ConstraintViolationError('Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n')),
(ExportOptions(decomposition_table='default', dynamo=True),
ConstraintViolationError('Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n')),
(ExportOptions(jit=True),
RuntimeError("Type 'Tuple[Tensor, int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced"))]
custom-dec¶
FAILED
Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".
- Not all values of batch = L['x'].size()[0] in the specified range satisfy the generated guard L['x'].size()[0] != 9223372036854775807.
custom-tracing¶
FAILED
One index is given as an integer i but this requires to append a node 'Squeeze' after this one and this is not yet implemented. You can replace the integer by `i:i+1`
--DEBUG--
[GraphBuilder-EDK] Message starts, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'i': None,
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'_sub_Linear__onx_matmul_input_10': 1,
'_sub_Linear__onx_transpose_weight0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'getitem_axis': 7,
'getitem_axis_0': 7,
'getitem_end': 7,
'getitem_shape': 7,
'i': 7,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul_input_10': ('batch', 1),
'_sub_Linear__onx_transpose_weight0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'buff': (1,),
'getitem_axis': (2,),
'getitem_axis_0': (1,),
'getitem_end': (1,),
'getitem_shape': (2,),
'i': (),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'sub': ('batch', 1),
'x': ('batch', 3)}
_known_value_shape={'getitem_shape': ('batch', 3)}
_known_constants=['_sub_Linear__onx_transpose_weight0',
'buff',
'getitem_axis',
'getitem_axis_0',
'linear.bias',
'linear.weight']
_known_ranks={}
--TORCH-USERS--
buff -> {sub}
getitem -> {add}
i -> {getitem}
linear -> {sigmoid}
sigmoid -> {sub}
sub -> {add}
x -> {linear, getitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
i: ('run_node', (('example_value', torch.int64, torch.Size([])), '')) --- 7:0:():
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:('batch', 1):
getitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=1] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add
-- process.progress --
node 6/9 target=<built-in function getitem>
--
[GraphBuilder-EDK.make_tensor_input] x[1:batchx3]
[GraphBuilder-EDK.make_tensor_input] i[7:]
[GraphBuilder-EDK.make_initializer] linear.weight[torch.float32:torch.float32:[0.02789284661412239, -0.11262331902980804, -0.1434551477432251]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-EDK.make_initializer] linear.bias[torch.float32:torch.float32:[-0.37545549869537354]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-EDK.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-EDK.make_initializer] getitem_axis[int64:int64:[0, 1]] - SOURCE: DynamoInterpreter._getitem_slice.axis.1
[GraphBuilder-EDK.make_initializer] getitem_axis_0[int64:int64:[0]] - SOURCE: DynamoInterpreter._getitem_slice.axis.2
[GraphBuilder-EDK.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-EDK.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose_weight0']
[GraphBuilder-EDK.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose_weight0']->['_sub_Linear__onx_matmul_input_10']
[GraphBuilder-EDK.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul_input_10', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-EDK.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-EDK.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-EDK.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-EDK.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-EDK.make_node] _getitem_slicenA [#:# ] Shape:['x']->['getitem_shape']
[GraphBuilder-EDK.make_node] _getitem_slicenB [##:# ] GatherElements:['getitem_shape', 'getitem_axis_0']->['getitem_end']
[GraphBuilder-EDK] Message completed, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".
- Not all values of batch = L['args'][0][0].size()[0] in the specified range satisfy the generated guard L['args'][0][0].size()[0] != 9223372036854775807.
(Refer to the full stack trace above for more information.)
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'i': None}
SignatureListFixedLength¶
forward¶
def forward(self, x, lx: list):
return (
torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
)
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.531, 0.483, -0.046], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.537], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.187, -0.343, -0.448], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.315], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_4
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub_2
Add(sub_2, mul_4) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx' type='NOTENSOR' shape=None
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.505, -0.071, -0.47 ], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.385], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
Reshape(linear.weight, init7_s2_-1_1) -> _sub_Linear__onx_transpose_weight0
Reshape(_sub_Linear__onx_transpose_weight0, init7_s2_1_-1) -> GemmTransposePattern--_sub_Linear__onx_transpose_weight0
Gemm(x, GemmTransposePattern--_sub_Linear__onx_transpose_weight0, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
SequenceAt(lx, init7_s_0) -> getitem
SequenceAt(lx, init7_s_1) -> getitem_1
ReduceSum(getitem_1, init7_s1_1, keepdims=1) -> sum_1
Mul(getitem, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.346, -0.519, 0.411], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.332], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_4
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Add(sub_2, mul_4) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureListFixedWithNone¶
forward¶
def forward(self, lx):
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(#3[T1r2,T1r2,None],), kwargs=None, exception=
-----
[(ExportOptions(),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=True, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(strict=False),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=False, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(decomposition_table='default'),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=True, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(strict=False, decomposition_table='default'),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=False, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(dynamo=True),
UserError("Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation")),
(ExportOptions(decomposition_table='default', dynamo=True),
UserError("Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation")),
(ExportOptions(jit=True),
RuntimeError("Type 'Tuple[List[Optional[Tensor]]]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced"))]
custom-dec¶
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
custom-tracing¶
FAILED
Unable to create an input 'lx' with type #3[T1r2,T1r2,None]
--DEBUG--
[GraphBuilder-FHW] Message starts, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': ('lx', 0)},
{'axis': 0, 'input_name': ('lx', 1)}]}
dynamic_dimensions_source_flat=[('lx', 0), ('lx', 1)]
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
_known_types={}
_known_shapes={}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
lx -> {getitem_2, getitem_3, getitem, getitem_1, getitem_4}
--TORCH-SHAPES--
lx: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%lx : [num_users=5] = placeholder[target=lx]
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%getitem, %getitem_2), kwargs = {})
%getitem_3 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%getitem_4 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%add, %getitem_4), kwargs = {})
return add_1
-- process.progress --
node 0/9 target=lx
--
[GraphBuilder-FHW] Message completed, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
(Refer to the full stack trace above for more information.)
script¶
FAILED
unable to convert dynamic shapes {'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureListVariableLength¶
forward¶
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.144, -0.375, -0.205], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.012], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([0.325, 0.316, 0.199], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.523], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub_4
Add(sub_4, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Type is unknown for result 'l', known_types={'x': 1}
--DEBUG--
[GraphBuilder-BNK] Message starts, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'},
{'axis': 0, 'input_name': ('lx', 0)},
{'axis': 0, 'input_name': ('lx', 1)}]}
dynamic_dimensions_source_flat=['x', ('lx', 0), ('lx', 1)]
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}],
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 3)}
_known_value_shape={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
cat -> {sum_1}
lx -> {cat}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
lx: ('run_node', ('', '')) --- :::
cat: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export-export_options=ExportOptions(tracing=True)
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list [num_users=1] = placeholder[target=lx]
%cat : [num_users=1] = call_function[target=torch.cat](args = (%lx, 1), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%cat,), kwargs = {axis: 1, keepdim: True})
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %sum_1), kwargs = {})
return add
-- process.progress --
node 2/10 target=<built-in method cat of type object at 0x7fd5086f6ec0>
--
[GraphBuilder-BNK.make_tensor_input] x[1:batchx3]
[GraphBuilder-BNK.make_tensor_input] lx[0:]
[GraphBuilder-BNK] Message completed, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.535, 0.118, -0.217], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.441], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Concat(lx_0, lx_1, axis=1) -> cat
Constant(value=[1]) -> val_3
ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, sum_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureShapeAsIndex¶
forward¶
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.352, 0.054, -0.048], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.006], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(y, end=2, start=1) -> _shape_y0
Slice(add, init7_s1_0, _shape_y0, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'length']
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.407, 0.566, -0.414], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.451], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Reshape(linear.weight, init7_s2_-1_1) -> _onx_transpose_p_linear_weight0
Reshape(_onx_transpose_p_linear_weight0, init7_s2_1_-1) -> GemmTransposePattern--_onx_transpose_p_linear_weight0
Gemm(x, GemmTransposePattern--_onx_transpose_p_linear_weight0, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add_6
Shape(y, end=2, start=1) -> _shape_y0
Slice(add_6, init7_s1_0, _shape_y0, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'length']
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.3 , -0.224, 0.249], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.205], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_start' type=int64 shape=(2,) -- array([0, 0]) -- DynamoInterpreter._getitem_slice.2
init: name='getitem_1_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- TransposeEqualReshapePattern.apply.new_shape
init: name='init7_s2_1_-1' type=int64 shape=(2,) -- array([ 1, -1]) -- TransposeEqualReshapePattern.apply.new_shape
Reshape(linear.weight, init7_s2_-1_1) -> _sub_Linear__onx_transpose_weight0
Reshape(_sub_Linear__onx_transpose_weight0, init7_s2_1_-1) -> GemmTransposePattern--_sub_Linear__onx_transpose_weight0
Gemm(x, GemmTransposePattern--_sub_Linear__onx_transpose_weight0, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(add) -> getitem_1_shape
GatherElements(getitem_1_shape, init7_s1_0) -> getitem_1_end
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_1) -> _onx_gather_getattr_10
Concat(getitem_1_end, _onx_gather_getattr_10, axis=0) -> _onx_concat_getitem_1_end0
Slice(add, getitem_1_start, _onx_concat_getitem_1_end0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='y' type=dtype('float32') shape=['s0', 's2']
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.145, -0.372, -0.4 ], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.025], dtype=float32)
Constant(value_ints=[-1]) -> val_17
Shape(y, end=2, start=1) -> val_0
Squeeze(val_0) -> sym_size_int_5
Reshape(sym_size_int_5, val_17, allowzero=0) -> val_18
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add_6
Constant(value=[0]) -> val_15
Constant(value=[1]) -> val_22
Constant(value_ints=[1]) -> val_23
Slice(add_6, val_15, val_18, val_22, val_23) -> slice_2
output: name='slice_2' type=dtype('float32') shape=['s0', 's2']
script¶
opset: domain='' version=17
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='args_3' type=float32 shape=(1, 3) -- array([-0.189, 0.167, -0.302], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.572], dtype=float32)
Constant(value=1) -> /Constant_output_0
Gemm(x, args_3, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Add(/Sigmoid_output_0, x) -> /Add_output_0
Shape(y) -> /Shape_output_0
Gather(/Shape_output_0, /Constant_output_0, axis=0) -> /Gather_output_0
Constant(value=[1]) -> /Constant_1_output_0
Constant(value=[0]) -> /Constant_2_output_0
Constant(value=[0]) -> /Constant_3_output_0
Unsqueeze(/Gather_output_0, /Constant_3_output_0) -> /Unsqueeze_output_0
Constant(value=[1]) -> /Constant_4_output_0
Slice(/Add_output_0, /Constant_2_output_0, /Unsqueeze_output_0, /Constant_1_output_0, /Constant_4_output_0) -> 21
output: name='21' type=dtype('float32') shape=['batch', 'Slice21_dim_1']
TypeBFloat16¶
forward¶
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
custom-fallback¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-dec¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add_3
Cast(add_3, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_Add_1'
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
Summary¶
case |
custom-dec |
custom-fallback |
custom-tracing |
dynamo-ir |
script |
---|---|---|---|---|---|
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
|||||
FAIL |
|||||
FAIL |
|||||
FAIL |
|||||
FAIL |
|||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
|||||
FAIL |
|||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
|||||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
||||
FAIL |
FAIL |