Exported ONNX with Dynamic Shapes¶
The following script shows the exported program for many short cases
and various way to retrieve the torch.fx.Graph
equivalent
to the original model.
<<<
import inspect
import textwrap
import pandas
from experimental_experiment.torch_interpreter.eval import discover, run_exporter
from experimental_experiment.ext_test_case import unit_test_going
from experimental_experiment.helpers import pretty_onnx
cases = discover()
print()
print(":ref:`Summary <lod-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <lod-model-case-export-{name}>`")
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _lod-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print("forward")
print("+++++++")
print()
print("::")
print()
print(
textwrap.indent(textwrap.dedent(inspect.getsource(cls_model.forward)), " ")
)
print()
for exporter in (
"custom-fallback",
"custom-tracing",
"dynamo-ir",
"script",
):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, True, quiet=True)
case_ref = f":ref:`{name} <lod-model-case-export-{name}>`"
if "exported" in res:
print("::")
print()
print(textwrap.indent(pretty_onnx(res["onnx"]), " "))
print()
obs.append(dict(case=case_ref, error="", exporter=exporter))
else:
print("**FAILED**")
print()
print("::")
print()
print(textwrap.indent(str(res["error"]), " "))
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=exporter))
print()
print(".. _lod-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided¶
forward¶
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
custom-fallback¶
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=[2, 2, 8, 4], stride=[128, 8, 16, 1], storage_offset=None
--DEBUG--
[GraphBuilder-UPS] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--CONSTRAINTS--
batch = {'s0'}
s0 = {'batch'}
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
s0 = 's0'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={'s0': 'batch'}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, torch.Size([s0, 2, 8, 8])))) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', ('val', torch.float32, torch.Size([2, 2, 8, 4])))) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
-- process.progress --
node 1/3 target=aten.as_strided.default
--
[GraphBuilder-UPS.make_tensor_input] x[1:batchx2x8x8]
[GraphBuilder-UPS] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
custom-tracing¶
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=(2, 2, 8, 4), stride=(128, 8, 16, 1), storage_offset=None
--DEBUG--
[GraphBuilder-BPG] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 2, 8, 8])), '')) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.as_strided](args = (%x, (2, 2, 8, 4), (128, 8, 16, 1)), kwargs = {})
return as_strided
-- process.progress --
node 1/3 target=<built-in method as_strided of type object at 0x7fa9d1ef7ba0>
--
[GraphBuilder-BPG.make_tensor_input] x[1:batchx2x8x8]
[GraphBuilder-BPG] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 2, 8, 8]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[2, 2, 8, ...) -> val_0
Constant(value=[128, 8, 1...) -> val_1
Constant(value=[4]) -> rank_tensor
Constant(value_int=0) -> indices
SequenceEmpty() -> one_seq
Constant(value_int=4) -> rank_0
Loop(rank_0, , one_seq, indices, body=G1) -> one_seq_16, indices_17
Constant(value_ints=[-1]) -> tmp_18
Reshape(x, tmp_18) -> self_flatten
Constant(value_int=0) -> storage_offset
CastLike(storage_offset, indices_17) -> storage_offset_cast
Add(indices_17, storage_offset_cast) -> indices_19
Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,one_seq_1,indices_2 -> cond_out,one_seq_15,indices_13
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='one_seq_1' type='NOTENSOR' shape=None
input: name='indices_2' type='NOTENSOR' shape=None
Constant(value_floats=[1.0]) -> tmp_14
SequenceInsert(one_seq_1, tmp_14) -> one_seq_15
Constant(value=4) -> rank_3_cast
Sub(rank_3_cast, i) -> tmp
Constant(value=1) -> int64_1_cast
Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
Expand(indices_2, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Constant(value=0) -> int64_0_cast
Constant(value=1) -> int64_1_5_cast
Range(int64_0_cast, size_dim_j, int64_1_5_cast) -> tmp_6
Mul(tmp_6, stride_dim_j) -> add_value
Constant(value=0) -> int64_0_7_cast
Equal(i, int64_0_7_cast) -> cond
If(cond, then_branch=G2, else_branch=G3) -> shape_11
Reshape(add_value, shape_11) -> add_value_12
Add(indices_4, add_value_12) -> indices_13
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
output: name='indices_13' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_1, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenInterpolate¶
forward¶
def forward(self, x):
y = torch.nn.functional.interpolate(
x,
scale_factor=2.0,
mode="bilinear",
recompute_scale_factor=False,
)
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> _onx_shape0
Concat(_onx_shape0, init7_s2_6_8, axis=0) -> _onx_concat0
Resize(x, , , _onx_concat0, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 2, 6, 8]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> _onx_shape0
Concat(_onx_shape0, init7_s2_6_8, axis=0) -> _onx_concat0
Resize(x, , , _onx_concat0, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['2', 2, 3, 4]
Constant(value_floats=[1.0,1.0,2.0,2.0]) -> val_0
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=['', 2, 6, 8]
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenNonZero¶
forward¶
def forward(self, x):
y = torch.nonzero(x)
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero0
Transpose(_onx_nonzero0, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['u1', 2]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero0
Transpose(_onx_nonzero0, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['', 2]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
AtenNonZeroTuple¶
forward¶
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1##shape_type_compute._cast_inputs.1(ge)
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
NonZero(x) -> _onx_nonzero0
SplitToSequence(_onx_nonzero0, axis=0, keepdims=0) -> nonzero_numpy
SequenceAt(nonzero_numpy, init7_s_0) -> output_0
SequenceAt(nonzero_numpy, init7_s_1) -> output_1
output: name='output_0' type=dtype('int64') shape=['u1']
output: name='output_1' type=dtype('int64') shape=['u1']
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
NonZero(x) -> _onx_nonzero0
SplitToSequence(_onx_nonzero0, axis=0, keepdims=0) -> nonzero
SequenceAt(nonzero, init7_s_0) -> output_0
SequenceAt(nonzero, init7_s_1) -> output_1
output: name='output_0' type=dtype('int64') shape=['d_output_0_0']
output: name='output_1' type=dtype('int64') shape=['d_output_1_0']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
Constant(value_ints=[1]) -> unbind_axis
NonZero(x) -> tmp
Transpose(tmp, perm=[1,0]) -> nonzero
Split(nonzero, axis=1, num_outputs=2) -> unbind_split_0, unbind_split_1
Squeeze(unbind_split_0, unbind_axis) -> unbind_split_0_squeeze
Identity(unbind_split_0_squeeze) -> getitem
Squeeze(unbind_split_1, unbind_axis) -> unbind_split_1_squeeze
Identity(unbind_split_1_squeeze) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['']
output: name='getitem_1' type=dtype('int64') shape=['']
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
AtenRollPos¶
forward¶
def forward(self, x):
return torch.roll(x, 1, -1)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 3, 4]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[-1]) -> dim_tensor
Constant(value=[1]) -> shift_tensor
Shape(x) -> tmp_0
Gather(tmp_0, dim_tensor, axis=0) -> tmp_1
Sub(tmp_1, shift_tensor) -> slice_length_3
Constant(value_ints=[0]) -> tmp_4
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Size(x) -> tmp_5
Reshape(tmp_5, neg_1) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
output: name='roll' type=dtype('float32') shape=['', '', '']
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
AtenRollRelu¶
forward¶
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> _onx_concat0
Relu(_onx_concat0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice0
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice02
Concat(_onx_slice0, _onx_slice02, axis=-1) -> _onx_concat0
Relu(_onx_concat0) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 3, 4]
Constant(value_ints=[-1]) -> neg_1
Constant(value=[-1]) -> dim_tensor
Constant(value=[-1]) -> shift_tensor
Constant(value=[1]) -> slice_length_3
Constant(value_ints=[0]) -> tmp_4
Slice(x, tmp_4, slice_length_3, dim_tensor) -> suffix
Size(x) -> tmp_5
Reshape(tmp_5, neg_1) -> tmp_6
Slice(x, slice_length_3, tmp_6, dim_tensor) -> prefix
Concat(prefix, suffix, axis=-1) -> roll
Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=['s0', 3, '']
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
BuildInIsInstance¶
forward¶
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['s1', 1]
input: name='lx_1' type=dtype('float32') shape=['s2', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.271, 0.311, -0.04 ], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.572], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Type is unknown for result 'lx', known_types={'x': 1, 'linear.weight': 1, 'linear.bias': 1, '_sub_Linear_input_1': 1, '_sub_Linear__onx_transpose0': 1, '_sub_Linear__onx_matmul0': 1, '_sub_Linear_linear': 1, '_sub_Linear_output': 1, 'linear': 1, 'sigmoid': 1, 'buff': 1, 'sub': 1}
--DEBUG--
[GraphBuilder-ISE] Message starts, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'},
{'axis': 0, 'input_name': 'lx_0'},
{'axis': 0, 'input_name': 'lx_1'}]}
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}],
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': ('batch', 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'sub': ('batch', 1),
'x': ('batch', 3)}
_known_constants=['_sub_Linear__onx_transpose0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
add -> {output}
buff -> {sub}
linear -> {sigmoid}
lx -> {add}
sigmoid -> {sub}
sub -> {add}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
lx: ('run_node', ('', '')) --- :::
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:('batch', 1):
add: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list | torch.Tensor [num_users=1] = placeholder[target=lx]
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %lx), kwargs = {})
return add
-- process.progress --
node 6/8 target=<built-in function add>
--
[GraphBuilder-ISE.make_tensor_input] x[1:batchx3]
[GraphBuilder-ISE.make_tensor_input] lx[0:]
[GraphBuilder-ISE.make_initializer] linear.weight[torch.float32:torch.float32:[0.03526715561747551, -0.11311865597963333, 0.3860451877117157]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-ISE.make_initializer] linear.bias[torch.float32:torch.float32:[0.378177672624588]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-ISE.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-ISE.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-ISE.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-ISE.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-ISE.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-ISE.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-ISE.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-ISE.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-ISE.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-ISE] Message completed, there are 3 initializers, 8 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.52], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Constant(value=[[0.023468...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
BuildInLen¶
forward¶
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['s1', 1]
input: name='lx_1' type=dtype('float32') shape=['s2', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.54 , -0.305, 0.104], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.133], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
len(.) expects an integer, len needs to be replaced. You should use _len.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.394], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Constant(value=[[0.423568...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
ComplexPolar¶
forward¶
def forward(self, x, angle):
return torch.polar(x, angle)
custom-fallback¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast02) of operator (Mul) in node (polar5) is invalid.
custom-tracing¶
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_cast02) of operator (Mul) in node (polar5) is invalid.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
input: name='angle' type=dtype('float32') shape=['s0', 4]
Constant(value=[-1]) -> int64_m1_1d
Cos(angle) -> tmp
Mul(x, tmp) -> tmp_0
Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
Mul(x, tmp_1) -> tmp_2
Constant(value=[-1]) -> int64_m1_1d_3
Unsqueeze(tmp_2, int64_m1_1d_3) -> imag
Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=['s0', 4, 2]
script¶
FAILED
Exporting the operator 'aten::polar' to ONNX opset version 17 is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: https://github.com/pytorch/pytorch/issues.
ControlFlowCond¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
no dynamic shape
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Inputs¶
forward¶
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x, y) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
input: 'y'
Cos(x) -> cos
Add(cos, y) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
input: 'y'
Cos(x) -> output_0
Sin(x) -> sin
Add(sin, y) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'IndexError'>: tuple index out of range
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCond2Outputs¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
false_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
true_graph_0[local_functions](x) -> cond#0, cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Cos(x) -> output_1
Sin(x) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Cos(x) -> output_0
Sin(x) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'IndexError'>: tuple index out of range
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondConstant¶
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Constant(value=[1024]) -> init7_s1_1024
Sin(x) -> sin
Shape(x, end=1, start=0) -> _onx_shape0
Concat(_onx_shape0, init7_s1_1024, axis=0) -> _onx_concat0
ConstantOfShape(_onx_concat0, value=[1.0]) -> ones
Sub(sin, ones) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions' version=1
input: 'x'
Constant(value=[1, 1024]) -> init7_s2_1_1024
ConstantOfShape(init7_s2_1_1024, value=[1.0]) -> ones
Cos(x) -> cos
Add(cos, ones) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=['s0', 1024]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0', 1024]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> sub_3_true_graph_0
Constant(value=[1024]) -> val_0_2
Shape(x, end=1, start=0) -> sym_size_int
Concat(sym_size_int, val_0_2, axis=0) -> val_1
Sin(x) -> sin
Constant(value=1.0) -> val_4
Expand(val_4, val_1) -> ones
Sub(sin, ones) -> sub_3_true_graph_0
output: name='sub_3_true_graph_0' type=dtype('float32') shape=['', 1024]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> add_6_false_graph_0
Constant(value=[[1.0, 1.0...) -> ones_2
Cos(x) -> cos
Add(cos, ones_2) -> add_6_false_graph_0
output: name='add_6_false_graph_0' type=dtype('float32') shape=['s0', 1024]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowCondNestedModule¶
forward¶
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x, submodule.weight, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast0
Mul(_onx_cast0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'p_submodule_weight'
input: 'x'
Cast(x, to=1) -> _onx_cast0
Div(_onx_cast0, p_submodule_weight) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Abs(x) -> abs_1
ReduceSum(abs_1, keepdims=0) -> sum_1
Constant(value=100) -> init7_s_100
Greater(sum_1, init7_s_100) -> gt
If(gt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - aten_cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](p_submodule_weight, x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
input: 'p_submodule_weight'
input: 'p_weight'
Cast(x, to=1) -> _onx_cast0
Sub(_onx_cast0, p_weight) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('int64') shape=['s0']
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
Constant(value=0) -> val_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, val_0) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_3 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Abs(x) -> abs_1
ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Constant(value=100) -> val_0_2
Greater(sum_1_2, val_0_2) -> gt_2
If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=2 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=2 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.then_branch=G3 -- level=1 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_4 - att.else_branch=G4 -- level=1 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0']
----- subgraph ---- If - node_If_3 - att.else_branch=G2 -- level=1 -- -> sub_1_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_1_false_graph_0
output: name='sub_1_false_graph_0' type=dtype('float32') shape=['s0']
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowNestCond¶
forward¶
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - aten_cond - att.else_branch=G1 -- level=1 -- -> cond#0
false_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G2 -- level=1 -- -> cond#0
true_graph_0[local_functions](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=true_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'x'
Sin(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=false_graph_0 domain=local_functions.0
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Cos(x) -> output_0
output: name='output_0' type=? shape=?
----- function name=true_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
input: 'x'
Constant(value=0.0) -> init1_s_
ReduceSum(x, keepdims=0) -> sum_1
Less(sum_1, init1_s_) -> lt
If(lt, else_branch=G3, then_branch=G4) -> output_0
output: name='output_0' type=? shape=?
----- subgraph ---- If - aten_cond - att.else_branch=G3 -- level=1 -- -> cond#0
false_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - aten_cond - att.then_branch=G4 -- level=1 -- -> cond#0
true_graph_0[local_functions.0](x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- function name=false_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: 'x'
Neg(x) -> output_0
output: name='output_0' type=? shape=?
custom-tracing¶
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.torch.__subgraph__' version=1
input: name='x' type=dtype('float32') shape=['s0', 3]
Constant(value=0.0) -> scalar_tensor_default
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Constant(value=0.0) -> scalar_tensor_default_2
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Less(sum_1_2, scalar_tensor_default_2) -> lt
If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type='NOTENSOR' shape=None
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=2 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=2 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.then_branch=G3 -- level=1 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4_2 - att.else_branch=G4 -- level=1 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['s0', 3]
----- subgraph ---- If - node_If_4 - att.else_branch=G2 -- level=1 -- -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=['s0', 3]
script¶
FAILED
Detected that you are using FX to torch.jit.trace a dynamo-optimized function. This is not supported at the moment.
ControlFlowScan¶
forward¶
def forward(self, x):
init = torch.zeros_like(x[0])
carry, out = torch.ops.higher_order.scan(
ControlFlowScan.add, [init], [x], dim=0, reverse=False, additional_inputs=[]
)
return carry
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Add(arg0_1, arg1_1) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScan2Carried¶
forward¶
def forward(self, x):
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
ControlFlowScan2Carried.add,
[init1, init2],
[x, x * 2],
dim=0,
reverse=False,
additional_inputs=[],
)
return carry1, carry2, out1, out2
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(x, _onx_reshape0) -> _onx_mul0
Scan(zeros_like, ones_like, x, _onx_mul0, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['batch', 4]
output: name='output_3' type=dtype('float32') shape=['batch', 4]
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_zeros_like, init_1_ones_like, scan_0_x, scan_1_mul) -> output_0, output_1, output_2, output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
input: 'arg3_1'
Add(arg0_1, arg2_1) -> output_0
Identity(output_0) -> output_2
Mul(arg1_1, arg3_1) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
output: name='output_2' type=? shape=?
output: name='output_3' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist¶
forward¶
def forward(self, x):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDist.dist,
[x],
[x],
dim=0,
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_x, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDist2¶
forward¶
def forward(self, x):
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
ControlFlowScanCDist2.dist,
[z],
[x],
dim=0,
reverse=False,
additional_inputs=[y],
)
return out[1]
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_detach_, scan_0_x, hidden_input_scan_0_clone) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
input: 'arg2_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg2_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
[CustomProxy(clone)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>,)
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%lift_fresh_copy], [%x], 0, False, [%clone]), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
ControlFlowScanCDistXY¶
forward¶
def forward(self, x, y):
carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDistXY.dist,
[y],
[x],
dim=0,
reverse=False,
additional_inputs=[],
)
return out
custom-fallback¶
opset: domain='' version=18
opset: domain='local_functions' version=1
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - aten_scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
scan_combine_graph_0[local_functions](init_0_y, scan_0_x) -> output_0, output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
----- function name=scan_combine_graph_0 domain=local_functions
----- doc_string: function_options=FunctionOptions(export_as_function=Tru...
opset: domain='' version=18
input: 'arg0_1'
input: 'arg1_1'
Constant(value=[1, -1]) -> init7_s2_1_-1
Reshape(arg1_1, init7_s2_1_-1) -> reshape
Sub(arg0_1, reshape) -> sub
Mul(sub, sub) -> mul
Constant(value=[1]) -> init7_s1_1
ReduceSum(mul, init7_s1_1, keepdims=0) -> sum_1
Sqrt(sum_1) -> output_1
Identity(arg0_1) -> output_0
output: name='output_0' type=? shape=?
output: name='output_1' type=? shape=?
custom-tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
dynamo-ir¶
FAILED
Failed to convert the exported program to an ONNX model. [96mThis is step 3/3[0m of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'torch.onnx._internal.exporter._errors.DispatchError'>: No ONNX function found for <torch._higher_order_ops.scan.ScanOp object at 0x7fa8cc5714f0>. Failure message: No decompositions registered for the real-valued input
⬆️
<class 'torch.onnx._internal.exporter._errors.ConversionError'>: Error when translating node %scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], 0, False, []), kwargs = {}). See the stack trace for more information.
(Refer to the full stack trace above for more information.)
script¶
FAILED
could not find kernel for HigherOrderOperator scan at dispatch key DispatchKey.??? (resolved from DispatchKey.???)
CreateFromShape¶
forward¶
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Reshape(init7_s_1, init7_s1_1) -> _onx_reshape0
Shape(x, end=1, start=0) -> _onx_shape0
Shape(x, end=2, start=1) -> _onx_shape02
Add(_onx_shape02, _onx_reshape0) -> _onx_add0
Concat(_onx_shape0, _onx_add0, axis=0) -> _onx_concat0
ConstantOfShape(_onx_concat0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 's1 + 1']
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 's1']
Constant(value=1.0) -> val_4
Shape(x, end=1, start=0) -> sym_size_int_2
Shape(x, end=2, start=1) -> sym_size_int_3
Constant(value=1) -> val_0
Add(sym_size_int_3, val_0) -> add
Concat(sym_size_int_2, add, axis=0) -> val_1
Expand(val_4, val_1) -> ones
output: name='ones' type=dtype('float32') shape=['', '']
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
CreateFromShapeThroughFunction¶
forward¶
def forward(self, x):
dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Reshape(init7_s_1, init7_s1_1) -> _onx_reshape0
Shape(x, end=1, start=0) -> _onx_shape0
Shape(x, end=2, start=1) -> _onx_shape02
Add(_onx_shape02, _onx_reshape0) -> _onx_add0
Concat(_onx_shape0, _onx_add0, axis=0) -> _onx_concat0
ConstantOfShape(_onx_concat0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 's1 + 1']
custom-tracing¶
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type CustomProxy at pos 0
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 's1']
Constant(value=1.0) -> val_4
Shape(x, end=1, start=0) -> sym_size_int_2
Shape(x, end=2, start=1) -> sym_size_int_3
Constant(value=1) -> val_0
Add(sym_size_int_3, val_0) -> add
Concat(sym_size_int_2, add, axis=0) -> val_1
Expand(val_4, val_1) -> ones
output: name='ones' type=dtype('float32') shape=['', '']
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
CropLastDimensionWithTensorContent¶
forward¶
def forward(self, x, shape):
return x[..., : shape[0]]
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T1r3,T7r1), kwargs=None, exception=
-----
[(ExportOptions(),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:581 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(decomposition_table='default'),
Unsupported('Dynamic slicing on data-dependent value is not supported\n\nfrom user code:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n')),
(ExportOptions(strict=False, decomposition_table='default'),
GuardOnDataDependentSymNode('Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)\n\nCaused by: (_export/non_strict_utils.py:581 in __torch_function__)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "/home/xadupre/github/experimental-experiment/experimental_experiment/torch_interpreter/eval/model_cases.py", line 699, in forward\n return x[..., : shape[0]]\n')),
(ExportOptions(dynamo=True),
UserError('Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs` has 2 elements, but `dynamic_shapes` has 1 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation')),
(ExportOptions(decomposition_table='default', dynamo=True),
UserError('Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs` has 2 elements, but `dynamic_shapes` has 1 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation')),
(ExportOptions(jit=True),
GuardOnDataDependentSymNode('Could not guard on data-dependent expression u0 < 0 (unhinted: u0 < 0). (Size-like symbols: none)\n\nCaused by: (_decomp/decompositions.py:733 in slice_forward)\nFor more information, run with TORCH_LOGS="dynamic"\nFor extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"\nIf you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\nFor more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n\nThe following call raised this error:\n File "<string>", line 1, in <lambda>\n\n\nWhile executing %slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %_local_scalar_dense_default, 1), kwargs = {})\nOriginal traceback:\nNone'))]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Gather(shape, init7_s1_0) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Slice(x, init7_s1_0, _onx_unsqueeze0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: When `dynamic_shapes` is specified as a dict, its top-level keys must be the arg names ['x', 'shape'] of `inputs`, but here they are ['x']. Alternatively, you could also ignore arg names entirely and specify `dynamic_shapes` as a list/tuple matching `inputs`. For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
(Refer to the full stack trace above for more information.)
script¶
FAILED
number of input names provided (3) exceeded number of inputs (2)
CropLastDimensionWithTensorShape¶
forward¶
def forward(self, x, y):
return x[..., : y.shape[0]]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(y, end=1, start=0) -> _onx_shape0
Slice(x, init7_s1_0, _onx_shape0, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'crop']
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_0) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Slice(x, init7_s1_0, _onx_unsqueeze0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4, 4]
input: name='y' type=dtype('float32') shape=['s1']
Constant(value_ints=[1]) -> val_11
Shape(y, end=1, start=0) -> sym_size_int_4
Constant(value=[0]) -> val_3
Constant(value_ints=[-1]) -> val_5
Reshape(sym_size_int_4, val_5, allowzero=0) -> val_6
Constant(value=[2]) -> val_10
Slice(x, val_3, val_6, val_10, val_11) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['', '', '']
script¶
FAILED
number of input names provided (3) exceeded number of inputs (2)
InplaceAdd¶
forward¶
def forward(self, x):
x += self.bias
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceAdd_¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-YVO] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
return add_
-- process.progress --
node 2/4 target=add_
--
[GraphBuilder-YVO.make_tensor_input] x[1:batchx4]
[GraphBuilder-YVO.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-YVO] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceAdd_Mul¶
forward¶
def forward(self, x):
x.add_(self.bias)
return x * 2
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Add(x, c_bias) -> add_
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(add_, _onx_reshape0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-RME] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'bias': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {mul}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
return mul
-- process.progress --
node 2/5 target=add_
--
[GraphBuilder-RME.make_tensor_input] x[1:batchx4]
[GraphBuilder-RME.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-RME] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
Constant(value=2.0) -> scalar_tensor_default
Mul(add_3, scalar_tensor_default) -> mul_4
output: name='mul_4' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceCloneAdd¶
forward¶
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_add_', args=(clone, bias), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-BOI] Message starts, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'bias': 1, 'clone': 1, 'x': 1}
_known_shapes={'bias': (1, 4), 'clone': ('batch', 4), 'x': ('batch', 4)}
_known_constants=['bias']
_known_ranks={}
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
clone -> {add_}
x -> {clone}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), '')) --- 1:2:('batch', 4):
clone: ('run_node', ('', '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
return add_
-- process.progress --
node 3/5 target=add_
--
[GraphBuilder-BOI.make_tensor_input] x[1:batchx4]
[GraphBuilder-BOI.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-BOI.make_node] .clone [#:# ] Identity:['x']->['clone']
[GraphBuilder-BOI] Message completed, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_6
output: name='add_6' type=dtype('float32') shape=['s0', 4]
script¶
FAILED
number of input names provided (3) exceeded number of inputs (1)
InplaceSetItemEllipsis_1¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
FAILED
no dynamic shape
custom-tracing¶
FAILED
setitem not implemented for indices=(Ellipsis, 'index')
--DEBUG--
[GraphBuilder-UTK] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 4), 'index': (4,), 'update': (8192, 4)}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 4):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-UTK.make_tensor_input] index[7:4]
[GraphBuilder-UTK.make_tensor_input] update[1:8192x4]
[GraphBuilder-UTK.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-UTK] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: false INTERNAL ASSERT FAILED at "/pytorch/build/aten/src/ATen/RegisterFunctionalization_1.cpp":5941, please report a bug to PyTorch. mutating a non-functional tensor with a functional tensor is not allowed. Please ensure that all of your inputs are wrapped inside of a functionalize() call.
While executing %index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
Original traceback:
None
(Refer to the full stack trace above for more information.)
script¶
FAILED
number of input names provided (4) exceeded number of inputs (2)
InplaceSetItemEllipsis_2¶
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
custom-fallback¶
FAILED
no dynamic shape
custom-tracing¶
FAILED
setitem not implemented for indices=(Ellipsis, 'index')
--DEBUG--
[GraphBuilder-INO] Message starts, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'_tensor_constant0': 1, 'index': 7, 'update': 1}
_known_shapes={'_tensor_constant0': (1, 8192, 6), 'index': (4,), 'update': (8192, 4)}
_known_constants=['_tensor_constant0']
_known_ranks={}
--TORCH-USERS--
_tensor_constant0 -> {setitem}
index -> {setitem}
setitem -> {output}
update -> {setitem}
--TORCH-SHAPES--
index: ('run_node', (('example_value', torch.int64, torch.Size([4])), '')) --- 7:1:(4,):
update: ('run_node', (('example_value', torch.float32, torch.Size([8192, 4])), '')) --- 1:2:(8192, 4):
_tensor_constant0: ('run_node', ('', '')) --- 1:3:(1, 8192, 6):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
-- process.progress --
node 3/5 target=<built-in function setitem>
--
[GraphBuilder-INO.make_tensor_input] index[7:4]
[GraphBuilder-INO.make_tensor_input] update[1:8192x4]
[GraphBuilder-INO.make_initializer] _tensor_constant0[torch.float32:torch.float32] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-INO] Message completed, there are 1 initializers, 0 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: false INTERNAL ASSERT FAILED at "/pytorch/build/aten/src/ATen/RegisterFunctionalization_1.cpp":5941, please report a bug to PyTorch. mutating a non-functional tensor with a functional tensor is not allowed. Please ensure that all of your inputs are wrapped inside of a functionalize() call.
While executing %index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%clone, [None, None, %index], %update), kwargs = {})
Original traceback:
None
(Refer to the full stack trace above for more information.)
script¶
FAILED
number of input names provided (4) exceeded number of inputs (2)
InplaceSetItemMask¶
forward¶
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 3]
custom-tracing¶
FAILED
setitem not implemented for indices=to
--DEBUG--
[GraphBuilder-DYM] Message starts, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'to': 9, 'x': 1}
_known_shapes={'to': ('batch', 3, 3), 'x': ('batch', 3, 3)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {output}
to -> {setitem}
x -> {to, setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 3, 3])), '')) --- 1:3:('batch', 3, 3):
to: ('run_node', ('', '')) --- 9:3:('batch', 3, 3):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=2] = placeholder[target=x]
%to : [num_users=1] = call_method[target=to](args = (%x, torch.bool), kwargs = {})
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, %to, 2), kwargs = {})
return setitem
-- process.progress --
node 2/4 target=<built-in function setitem>
--
[GraphBuilder-DYM.make_tensor_input] x[1:batchx3x3]
[GraphBuilder-DYM.make_node] .to [#:# ] Cast>9:['x']->['to']
[GraphBuilder-DYM] Message completed, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Non-zero status code returned while running If node. Name:'n7_2' Status Message: Non-zero status code returned while running ScatterElements node. Name:'n15_n0_7' Status Message: Indices and updates must have the same rank
script¶
FAILED
number of input names provided (2) exceeded number of inputs (1)
InplaceSetItemSquare¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Shape(x) -> _onx_shape02
Gather(_onx_shape02, init7_s1_0) -> _onx_gather0
Range(init7_s1_0, _onx_gather0, init7_s1_1) -> _onx_range0
Slice(_onx_range0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice0
Reshape(_onx_slice0, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-WVG] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {output}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
return setitem
-- process.progress --
node 1/3 target=<built-in function setitem>
--
[GraphBuilder-WVG.make_tensor_input] x[1:batchx5]
[GraphBuilder-WVG] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=[0]) -> val_3
Constant(value=[2]) -> val_7
Constant(value=[0]) -> val_10
Constant(value_ints=[1]) -> val_11
Slice(x, val_3, val_7, val_10, val_11) -> slice_1
Constant(value=[0]) -> val_14
Constant(value=[3]) -> val_18
Constant(value=1) -> val_19
Constant(value=[1]) -> val_22
Constant(value_ints=[1]) -> val_23
Slice(slice_1, val_14, val_18, val_22, val_23) -> slice_2
Shape(slice_2) -> shape
Constant(value=1.0) -> value_0
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Shape(slice_3, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Constant(value_ints=[0]) -> val_34
Constant(value=[0]) -> val_38
Constant(value=[3]) -> val_39
Constant(value=[1]) -> val_40
Slice(val_37, val_38, val_39, val_34, val_40) -> val_41
Constant(value=-1) -> val_42
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_3, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
InplaceSetItemSquareAdd¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Shape(x) -> _onx_shape02
Gather(_onx_shape02, init7_s1_0) -> _onx_gather0
Range(init7_s1_0, _onx_gather0, init7_s1_1) -> _onx_range0
Slice(_onx_range0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice0
Reshape(_onx_slice0, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _onx_reshape03
Add(output_0, _onx_reshape03) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-ZLG] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {add}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
return add
-- process.progress --
node 1/4 target=<built-in function setitem>
--
[GraphBuilder-ZLG.make_tensor_input] x[1:batchx5]
[GraphBuilder-ZLG] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=[0]) -> val_3
Constant(value=[2]) -> val_7
Constant(value=[0]) -> val_10
Constant(value_ints=[1]) -> val_11
Slice(x, val_3, val_7, val_10, val_11) -> slice_1
Constant(value=[0]) -> val_14
Constant(value=[3]) -> val_18
Constant(value=1) -> val_19
Constant(value=[1]) -> val_22
Constant(value_ints=[1]) -> val_23
Slice(slice_1, val_14, val_18, val_22, val_23) -> slice_2
Shape(slice_2) -> shape
Constant(value=1.0) -> value_0
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Shape(slice_3, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Constant(value_ints=[0]) -> val_34
Constant(value=[0]) -> val_38
Constant(value=[3]) -> val_39
Constant(value=[1]) -> val_40
Slice(val_37, val_38, val_39, val_34, val_40) -> val_41
Constant(value=-1) -> val_42
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_3, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
output: name='add' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
InplaceSetItemSquareAdd2¶
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
custom-fallback¶
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/export/_unlift.py:75: UserWarning: Attempted to insert a get_attr Node with no underlying reference in the owning GraphModule! Call GraphModule.add_submodule to add the necessary submodule, GraphModule.add_parameter to add the necessary Parameter, or nn.Module.register_buffer to add the necessary buffer
getattr_node = gm.graph.get_attr(lifted_node)
- /home/xadupre/vv/this312/lib/python3.12/site-packages/torch/fx/graph.py:1801: UserWarning: Node lifted_tensor_0 target lifted_tensor_0 lifted_tensor_0 of does not reference an nn.Module, nn.Parameter, or buffer, which is what ‘get_attr’ Nodes typically target
warnings.warn(
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s2_-1_1' type=int64 shape=(2,) -- array([-1, 1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([2.], dtype=float32)-- shape_type_compute._cast_inputs.0
init: name='init1_s_2' type=float32 shape=() -- array([3.], dtype=float32)-- shape_type_compute._cast_inputs.0
Concat(init7_s1_0, init7_s1_1, axis=0) -> SliceSlicePattern_init7_s1_1_axis
Concat(init7_s1_0, init7_s1_0, axis=0) -> SliceSlicePattern_init7_s1_0_start
Concat(init7_s1_2, init7_s1_3, axis=0) -> SliceSlicePattern_init7_s1_3_end
Slice(x, SliceSlicePattern_init7_s1_0_start, SliceSlicePattern_init7_s1_3_end, SliceSlicePattern_init7_s1_1_axis) -> slice_2
Shape(slice_2) -> _onx_shape0
Expand(c_lifted_tensor_0, _onx_shape0) -> fill
Transpose(fill, perm=[1,0]) -> _onx_transpose02
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> _onx_transpose0
Reshape(init7_s3_0_1_2, init7_s2_-1_1) -> _onx_reshape0
ScatterND(_onx_transpose0, _onx_reshape0, _onx_transpose02) -> _onx_scatternd0
Transpose(_onx_scatternd0, perm=[1,0]) -> slice_scatter
Shape(x) -> _onx_shape02
Gather(_onx_shape02, init7_s1_0) -> _onx_gather0
Range(init7_s1_0, _onx_gather0, init7_s1_1) -> _onx_range0
Slice(_onx_range0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice0
Reshape(_onx_slice0, init7_s2_-1_1) -> _onx_reshape02
ScatterND(x, _onx_reshape02, slice_scatter) -> output_0
Reshape(init1_s_, init7_s1_1) -> _onx_reshape03
Add(output_0, _onx_reshape03) -> output_1
Reshape(init1_s_2, init7_s1_1) -> _onx_reshape04
Add(output_0, _onx_reshape04) -> output_2
output: name='output_1' type=dtype('float32') shape=['batch', 5]
output: name='output_2' type=dtype('float32') shape=['batch', 5]
custom-tracing¶
FAILED
setitem not implemented for indices=(slice(None, 2, None), slice(None, 3, None))
--DEBUG--
[GraphBuilder-MBO] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 5)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
setitem -> {add, add_1}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), '')) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=2] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%setitem, 3), kwargs = {})
return (add, add_1)
-- process.progress --
node 1/5 target=<built-in function setitem>
--
[GraphBuilder-MBO.make_tensor_input] x[1:batchx5]
[GraphBuilder-MBO] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
opset: domain='pkg.onnxscript.torch_lib' version=1
input: name='x' type=dtype('float32') shape=['s0', 5]
Constant(value=0) -> val_0
Constant(value=[0]) -> val_3
Constant(value=[2]) -> val_7
Constant(value=[0]) -> val_10
Constant(value_ints=[1]) -> val_11
Slice(x, val_3, val_7, val_10, val_11) -> slice_1
Constant(value=[0]) -> val_14
Constant(value=[3]) -> val_18
Constant(value=1) -> val_19
Constant(value=[1]) -> val_22
Constant(value_ints=[1]) -> val_23
Slice(slice_1, val_14, val_18, val_22, val_23) -> slice_2
Shape(slice_2) -> shape
Constant(value=1.0) -> value_0
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Constant(value=[0]) -> val_26
Constant(value=[2]) -> val_29
Constant(value=[0]) -> val_32
Constant(value_ints=[1]) -> val_33
Slice(x, val_26, val_29, val_32, val_33) -> slice_3
Shape(slice_3, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Constant(value_ints=[0]) -> val_34
Constant(value=[0]) -> val_38
Constant(value=[3]) -> val_39
Constant(value=[1]) -> val_40
Slice(val_37, val_38, val_39, val_34, val_40) -> val_41
Constant(value=-1) -> val_42
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_3, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
Constant(value_ints=[0]) -> val_47
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Constant(value=[0]) -> val_51
Constant(value=[2]) -> val_52
Constant(value=[1]) -> val_53
Slice(val_50, val_51, val_52, val_47, val_53) -> val_54
Unsqueeze(val_54, val_42) -> val_55
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Constant(value=2.0) -> scalar_tensor_default
Add(slice_scatter_1, scalar_tensor_default) -> add
Constant(value=3.0) -> scalar_tensor_default_1
Add(slice_scatter_1, scalar_tensor_default_1) -> add_4
output: name='add' type=dtype('float32') shape=['s0', 5]
output: name='add_4' type=dtype('float32') shape=['s0', 5]
script¶
FAILED
number of input names provided (5) exceeded number of inputs (1)
SignatureFloat1¶
forward¶
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='alpha' type=dtype('float32') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_' type=float32 shape=() -- array([1.5], dtype=float32)-- shape_type_compute._cast_inputs.1(mul_Tensor)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.135, 0.428, -0.433], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.101], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Reshape(init1_s_, init7_s1_1) -> _onx_reshape0
Mul(b_buff, _onx_reshape0) -> _onx_mul0
Sub(sigmoid, _onx_mul0) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
[GraphBuilder-PVK] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes=({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
None)
_known_value_shape={}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'alpha': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': ('batch', 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'alpha': (1,),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'x': ('batch', 3)}
_known_constants=['_sub_Linear__onx_transpose0', 'buff', 'linear.bias', 'linear.weight']
_known_ranks={}
--TORCH-USERS--
alpha -> {mul}
buff -> {mul}
linear -> {sigmoid}
mul -> {sub}
sigmoid -> {sub}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
alpha: ('run_node', ('', '')) --- 1:1:(1,):
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
mul: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
-- process.progress --
node 5/8 target=mul
--
[GraphBuilder-PVK.make_tensor_input] x[1:batchx3]
[GraphBuilder-PVK.make_tensor_input] alpha[1:1]
[GraphBuilder-PVK.make_initializer] linear.weight[torch.float32:torch.float32:[-0.10916125774383545, 0.41274309158325195, -0.5378062725067139]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-PVK.make_initializer] linear.bias[torch.float32:torch.float32:[-0.0961490347981453]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-PVK.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-PVK.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-PVK.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-PVK.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-PVK.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-PVK.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-PVK.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-PVK.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-PVK] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'
script¶
FAILED
unable to convert dynamic shapes ({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, None)
SignatureInt1¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.008, -0.344, -0.394], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.026], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_2
Add(sub, slice_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='i' type=dtype('int64') shape=[1]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.145, -0.487, -0.441], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.527], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='getitem_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_axis_0' type=int64 shape=(1,) -- array([0]) -- DynamoInterpreter._getitem_slice.axis.2##Opset.make_node.1/Shape
init: name='getitem_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
Concat(getitem_axis_0, i, axis=0) -> _onx_concat02
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
Reshape(init7_s_1, init7_s1_1) -> _onx_reshape0
Add(i, _onx_reshape0) -> _onx_add0
Shape(x) -> getitem_shape
GatherElements(getitem_shape, getitem_axis_0) -> getitem_end
Concat(getitem_end, _onx_add0, axis=0) -> _onx_concat0
Slice(x, _onx_concat02, _onx_concat0, getitem_axis, getitem_step) -> getitem
Add(sub, getitem) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'
script¶
FAILED
unable to convert dynamic shapes ({0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, None)
SignatureInt2¶
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
custom-fallback¶
FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(T1r2,int), kwargs=None, exception=
-----
[(ExportOptions(),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=True, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(strict=False),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=False, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'args\'][0][0].size()[0] in the specified range satisfy the generated guard L[\'args\'][0][0].size()[0] != 9223372036854775807.\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(decomposition_table='default'),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=True, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(strict=False, decomposition_table='default'),
RuntimeError('Unable to convert model <class \'experimental_experiment.torch_interpreter.eval.model_cases.SignatureInt2\'>, type(args)=<class \'tuple\'>, type(args[0])=<class \'torch.Tensor\'>, strict=False, input_names=None\n--\ndynamic_shapes={\'x\': {0: <class \'experimental_experiment.torch_interpreter.eval.model_cases.batch\'>}, \'i\': None}\n--\ne=Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'args\'][0][0].size()[0] in the specified range satisfy the generated guard L[\'args\'][0][0].size()[0] != 9223372036854775807.\n--\neee=None\n---exported-program---\ngraph():\n %p_linear_weight : [num_users=1] = placeholder[target=p_linear_weight]\n %p_linear_bias : [num_users=1] = placeholder[target=p_linear_bias]\n %b_buff : [num_users=1] = placeholder[target=b_buff]\n %x : [num_users=2] = placeholder[target=x]\n %i : [num_users=0] = placeholder[target=i]\n %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %p_linear_weight, %p_linear_bias), kwargs = {})\n %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})\n %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %b_buff), kwargs = {})\n %slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})\n %select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})\n %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})\n return (add,)')),
(ExportOptions(dynamo=True),
ConstraintViolationError('Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n')),
(ExportOptions(decomposition_table='default', dynamo=True),
ConstraintViolationError('Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".\n - Not all values of batch = L[\'x\'].size()[0] in the specified range satisfy the generated guard L[\'x\'].size()[0] != 9223372036854775807.\n')),
(ExportOptions(jit=True),
RuntimeError("Type 'Tuple[Tensor, int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced"))]
custom-tracing¶
FAILED
One index is given as an integer i but this requires to append a node 'Squeeze' after this one and this is not yet implemented. You can replace the integer by `i:i+1`
--DEBUG--
[GraphBuilder-RHW] Message starts, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_alias={}
dynamic_shapes={'i': None,
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={'getitem_shape': ('batch', 3)}
_known_types={'_sub_Linear__onx_matmul0': 1,
'_sub_Linear__onx_transpose0': 1,
'_sub_Linear_input_1': 1,
'_sub_Linear_linear': 1,
'_sub_Linear_output': 1,
'buff': 1,
'getitem_axis': 7,
'getitem_axis_0': 7,
'getitem_end': 7,
'getitem_shape': 7,
'i': 7,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_shapes={'_sub_Linear__onx_matmul0': ('batch', 1),
'_sub_Linear__onx_transpose0': (3, 1),
'_sub_Linear_input_1': ('batch', 3),
'_sub_Linear_linear': ('batch', 1),
'_sub_Linear_output': ('batch', 1),
'buff': (1,),
'getitem_axis': (2,),
'getitem_axis_0': (1,),
'getitem_end': (1,),
'getitem_shape': (2,),
'i': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'sub': ('batch', 1),
'x': ('batch', 3)}
_known_constants=['_sub_Linear__onx_transpose0',
'buff',
'getitem_axis',
'getitem_axis_0',
'linear.bias',
'linear.weight']
_known_ranks={}
--TORCH-USERS--
buff -> {sub}
getitem -> {add}
i -> {getitem}
linear -> {sigmoid}
sigmoid -> {sub}
sub -> {add}
x -> {linear, getitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
i: ('run_node', ('', '')) --- 7:1:(1,):
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:('batch', 1):
getitem: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=1] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add
-- process.progress --
node 6/9 target=<built-in function getitem>
--
[GraphBuilder-RHW.make_tensor_input] x[1:batchx3]
[GraphBuilder-RHW.make_tensor_input] i[7:1]
[GraphBuilder-RHW.make_initializer] linear.weight[torch.float32:torch.float32:[-0.03334775194525719, 0.5131746530532837, -0.5136443972587585]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-RHW.make_initializer] linear.bias[torch.float32:torch.float32:[0.2065061330795288]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-RHW.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-RHW.make_initializer] getitem_axis[int64:int64:[0, 1]] - SOURCE: DynamoInterpreter._getitem_slice.axis.1
[GraphBuilder-RHW.make_initializer] getitem_axis_0[int64:int64:[0]] - SOURCE: DynamoInterpreter._getitem_slice.axis.2
[GraphBuilder-RHW.make_node] .make_nodes [#:# ] Identity:['x']->['_sub_Linear_input_1']
[GraphBuilder-RHW.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_Linear__onx_transpose0']
[GraphBuilder-RHW.make_node] Opset [##:# ] MatMul:['_sub_Linear_input_1', '_sub_Linear__onx_transpose0']->['_sub_Linear__onx_matmul0']
[GraphBuilder-RHW.make_node] Opset2 [##:# ] Add:['_sub_Linear__onx_matmul0', 'linear.bias']->['_sub_Linear_linear']
[GraphBuilder-RHW.make_node] .output [#:# ] Identity:['_sub_Linear_linear']->['_sub_Linear_output']
[GraphBuilder-RHW.make_node] .make_nodes2 [#:# ] Identity:['_sub_Linear_output']->['linear']
[GraphBuilder-RHW.make_node] Opset3 [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-RHW.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-RHW.make_node] _getitem_slicenA [#:# ] Shape:['x']->['getitem_shape']
[GraphBuilder-RHW.make_node] _getitem_slicenB [##:# ] GatherElements:['getitem_shape', 'getitem_axis_0']->['getitem_end']
[GraphBuilder-RHW] Message completed, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".
- Not all values of batch = L['args'][0][0].size()[0] in the specified range satisfy the generated guard L['args'][0][0].size()[0] != 9223372036854775807.
(Refer to the full stack trace above for more information.)
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'i': None}
SignatureListFixedLength¶
forward¶
def forward(self, x, lx: list):
return (
torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['s1', 1]
input: name='lx_1' type=dtype('float32') shape=['s2', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.527, -0.18 , 0.187], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.507], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx' type='NOTENSOR' shape=None
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.318, -0.432, 0.065], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.096], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- DynamoInterpreter.getitem.1
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter.getitem.1
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
SequenceAt(lx, init7_s_0) -> getitem
SequenceAt(lx, init7_s_1) -> getitem_1
ReduceSum(getitem_1, init7_s1_1, keepdims=1) -> sum_1
Mul(getitem, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.43], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Constant(value=[[0.155218...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Constant(value=[1]) -> val_3
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_4
Add(sub_2, mul_4) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureListFixedWithNone¶
forward¶
def forward(self, lx):
print(lx)
print(lx[1])
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
custom-fallback¶
[FakeTensor(…, size=(4, 4)), FakeTensor(…, size=(4, 4)), None] FakeTensor(…, size=(4, 4)) [FakeTensor(…, size=(4, 4)), FakeTensor(…, size=(4, 4)), None] FakeTensor(…, size=(4, 4)) FAILED
None of the following options [ExportOptions(), ExportOptions(strict=False), ExportOptions(decomposition_table='default'), ExportOptions(strict=False, decomposition_table='default'), ExportOptions(dynamo=True), ExportOptions(decomposition_table='default', dynamo=True), ExportOptions(jit=True)] worked, args=(#3[T1r2,T1r2,None],), kwargs=None, exception=
-----
[(ExportOptions(),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=True, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(strict=False),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=False, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(decomposition_table='default'),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=True, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(strict=False, decomposition_table='default'),
RuntimeError("Unable to convert model <class 'experimental_experiment.torch_interpreter.eval.model_cases.SignatureListFixedWithNone'>, type(args)=<class 'tuple'>, type(args[0])=<class 'list'>, strict=False, input_names=None\n--\ndynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}\n--\ne=Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation\n--\neee=None\n---exported-program---\ngraph():\n %lx_0 : [num_users=1] = placeholder[target=lx_0]\n %lx_1 : [num_users=1] = placeholder[target=lx_1]\n %lx_2 : [num_users=0] = placeholder[target=lx_2]\n %add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%lx_0, %lx_1), kwargs = {})\n return (add_,)")),
(ExportOptions(dynamo=True),
UserError("Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation")),
(ExportOptions(decomposition_table='default', dynamo=True),
UserError("Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements\nFor more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation")),
(ExportOptions(jit=True),
RuntimeError("Type 'Tuple[List[Optional[Tensor]]]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced"))]
custom-tracing¶
CustomProxy(lx) CustomProxy(getitem) FAILED
Unable to create an input 'lx' with type #3[T1r2,T1r2,None]
--DEBUG--
[GraphBuilder-AGE] Message starts, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'lx_0'},
{'axis': 0, 'input_name': 'lx_1'}]}
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
_known_value_shape={}
_known_types={}
_known_shapes={}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
lx -> {getitem_1, getitem, getitem_5, getitem_2, getitem_3, getitem_4}
--TORCH-SHAPES--
lx: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%lx : [num_users=6] = placeholder[target=lx]
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 0), kwargs = {})
%getitem_2 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%getitem_1, %getitem_3), kwargs = {})
%getitem_4 : [num_users=0] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%add, %getitem_5), kwargs = {})
return add_1
-- process.progress --
node 0/10 target=lx
--
[GraphBuilder-AGE] Message completed, there are 0 initializers, 0 nodes, 0 inputs, 0 outputs.
dynamo-ir¶
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
(Refer to the full stack trace above for more information.)
script¶
FAILED
unable to convert dynamic shapes {'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureListVariableLength¶
forward¶
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['s1', 1]
input: name='lx_1' type=dtype('float32') shape=['s2', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.101, -0.38 , 0.035], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.146], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
custom-tracing¶
FAILED
Type is unknown for result 'l', known_types={'x': 1}
--DEBUG--
[GraphBuilder-NWY] Message starts, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs.
--PARAMETERS--
dynamic_examples=
--SHAPE--
dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'},
{'axis': 0, 'input_name': 'lx_0'},
{'axis': 0, 'input_name': 'lx_1'}]}
dynamic_alias={}
dynamic_shapes={'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>},
{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}],
'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}}
_known_value_shape={}
_known_types={'x': 1}
_known_shapes={'x': ('batch', 3)}
_known_constants=[]
_known_ranks={}
--TORCH-USERS--
cat -> {sum_1}
lx -> {cat}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), '')) --- 1:2:('batch', 3):
lx: ('run_node', ('', '')) --- :::
cat: ('run_node', ('', '')) --- :::
--ONNX--
-- process.graph_module --
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list [num_users=1] = placeholder[target=lx]
%cat : [num_users=1] = call_function[target=torch.cat](args = (%lx, 1), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%cat,), kwargs = {axis: 1, keepdim: True})
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %sum_1), kwargs = {})
return add
-- process.progress --
node 2/10 target=<built-in method cat of type object at 0x7fa9d1ef7ba0>
--
[GraphBuilder-NWY.make_tensor_input] x[1:batchx3]
[GraphBuilder-NWY.make_tensor_input] lx[0:]
[GraphBuilder-NWY] Message completed, there are 0 initializers, 0 nodes, 2 inputs, 2 outputs..
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='lx_0' type=dtype('float32') shape=['s0', 1]
input: name='lx_1' type=dtype('float32') shape=['s0', 2]
init: name='linear.bias' type=float32 shape=(1,) -- array([0.245], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
Concat(lx_0, lx_1, axis=1) -> cat
Constant(value=[1]) -> val_3
ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Constant(value=[[-0.48945...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, sum_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['s0', 1]
script¶
FAILED
unable to convert dynamic shapes {'x': {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, 'lx': [{0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}, {0: <class 'experimental_experiment.torch_interpreter.eval.model_cases.batch'>}]}
SignatureShapeAsIndex¶
forward¶
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.179, -0.079, -0.569], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.149], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, linear.weight, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(y, end=2, start=1) -> _onx_shape0
Slice(add, init7_s1_0, _onx_shape0, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'length']
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.355, 0.051, -0.5 ], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.069], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_start' type=int64 shape=(2,) -- array([0, 0]) -- DynamoInterpreter._getitem_slice.2
init: name='getitem_1_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
Gemm(x, linear.weight, linear.bias, transB=1) -> _sub_Linear_linear
Sigmoid(_sub_Linear_linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(add) -> getitem_1_shape
GatherElements(getitem_1_shape, init7_s1_0) -> getitem_1_end
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_1) -> _onx_gather0
Squeeze(_onx_gather0, init7_s1_0) -> getitem
Unsqueeze(getitem, init7_s1_0) -> _onx_unsqueeze0
Concat(getitem_1_end, _onx_unsqueeze0, axis=0) -> _onx_concat0
Slice(add, getitem_1_start, _onx_concat0, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
dynamo-ir¶
opset: domain='pkg.onnxscript.torch_lib.common' version=1
opset: domain='' version=18
input: name='x' type=dtype('float32') shape=['s0', 3]
input: name='y' type=dtype('float32') shape=['s0', 's2']
init: name='linear.bias' type=float32 shape=(1,) -- array([0.537], dtype=float32)
Constant(value_ints=[-1]) -> val_16
Shape(y, end=2, start=1) -> sym_size_int_5
Reshape(sym_size_int_5, val_16, allowzero=0) -> val_17
Constant(value=[[0.493760...) -> t
Gemm(x, t, linear.bias, beta=1.00, transB=0, alpha=1.00, transA=0) -> addmm
Sigmoid(addmm) -> sigmoid
Add(sigmoid, x) -> add_6
Constant(value=[0]) -> val_14
Constant(value=[1]) -> val_21
Constant(value_ints=[1]) -> val_22
Slice(add_6, val_14, val_17, val_21, val_22) -> slice_2
output: name='slice_2' type=dtype('float32') shape=['', '']
script¶
opset: domain='' version=17
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='args_3' type=float32 shape=(1, 3) -- array([0.045, 0.106, 0.384], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.017], dtype=float32)
Constant(value=1) -> /Constant_output_0
Gemm(x, args_3, linear.bias, alpha=1.00, beta=1.00, transB=1) -> /linear/Gemm_output_0
Sigmoid(/linear/Gemm_output_0) -> /Sigmoid_output_0
Add(/Sigmoid_output_0, x) -> /Add_output_0
Shape(y) -> /Shape_output_0
Gather(/Shape_output_0, /Constant_output_0, axis=0) -> /Gather_output_0
Constant(value=[1]) -> /Constant_1_output_0
Constant(value=[0]) -> /Constant_2_output_0
Constant(value=[0]) -> /Constant_3_output_0
Unsqueeze(/Gather_output_0, /Constant_3_output_0) -> /Unsqueeze_output_0
Constant(value=[1]) -> /Constant_4_output_0
Slice(/Add_output_0, /Constant_2_output_0, /Unsqueeze_output_0, /Constant_1_output_0, /Constant_4_output_0) -> 21
output: name='21' type=dtype('float32') shape=['batch', 'Slice21_dim_1']
TypeBFloat16¶
forward¶
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
custom-fallback¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
custom-tracing¶
opset: domain='' version=18
doc_string: large_model=False, inline=False, external_threshold=102...
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
dynamo-ir¶
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_Add_1'
script¶
FAILED
number of input names provided (4) exceeded number of inputs (1)
Summary¶
case |
custom-fallback |
custom-tracing |
dynamo-ir |
script |
---|---|---|---|---|
FAIL |
FAIL |
FAIL |
||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
|||
FAIL |
FAIL |