Overview of Exportability#
The following script shows the exported program for many short cases to retrieve an ONNX model equivalent to the original model. Go to Bottom of the page to see a table summarizing the results.
<<<
import inspect
import textwrap
import pandas
from yobx.helpers import string_type
from yobx.helpers.onnx_helper import pretty_onnx
from yobx.torch.model_eval_cases import discover, run_exporter
from yobx.ext_test_case import unit_test_going
cases = discover()
print()
print(":ref:`Summary <ledx-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <ledx-model-case-export-{name}>`")
print()
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _ledx-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print(f"code: :class:`yobx.torch._model_eval_cases.{name}`")
print()
print("forward")
print("+++++++")
print()
print(".. code-block:: python")
print()
src = inspect.getsource(cls_model.forward)
if src:
print(textwrap.indent(textwrap.dedent(src), " "))
else:
print(" # code is missing")
print()
print()
for exporter in ("yobx", "dynamo-ir", "tracing"):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, True, quiet=True)
case_ref = f":ref:`{name} <ledx-model-case-export-{name}>`"
expo = exporter.split("-", maxsplit=1)[-1]
if "inputs" in res:
print(f"* **inputs:** ``{string_type(res['inputs'], with_shape=True)}``")
if "dynamic_shapes" in res:
print(f"* **shapes:** ``{string_type(res['dynamic_shapes'])}``")
print()
print()
if "onx" in res:
print(".. code-block:: text")
print()
print(textwrap.indent(pretty_onnx(res["onx"]), " "))
print()
print()
if "error" not in res:
obs.append(dict(case=case_ref, error="", exporter=expo))
if "error" in res:
print("**FAILED**")
print()
print(".. code-block:: text")
print()
err = str(res["error"])
if err:
print(textwrap.indent(err, " "))
else:
print(" # no error found for the failure")
print()
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=expo))
print()
print(".. _ledx-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided#
code: yobx.torch._model_eval_cases.AtenAsStrided
forward#
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
yobx#
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=[2, 2, 8, 4], stride=[128, 8, 16, 1], storage_offset=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-JNS] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s77'}
s77 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s77 = 's77'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s77': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_types={'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, torch.Size([s77, 2, 8, 8])))) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', ('val', torch.float32, torch.Size([2, 2, 8, 4])))) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
ExportedProgram:
class GraphModule(torch.nn.Module):
def forward(self, x: "f32[s77, 2, 8, 8]"):
# File: ~/github/yet-another-onnx-builder/yobx/torch/_model_eval_cases.py:308 in forward, code: y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
as_strided: "f32[2, 2, 8, 4]" = torch.ops.aten.as_strided.default(x, [2, 2, 8, 4], [128, 8, 16, 1]); x = None
return (as_strided,)
Graph signature:
# inputs
x: USER_INPUT
# outputs
as_strided: USER_OUTPUT
Range constraints: {s77: VR[0, int_oo]}
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
-- process.inputs_to_remove --
set()
-- process.progress --
node 1/3 target=aten.as_strided.default
-- 1 INPUTS
[GraphBuilder-JNS.1.make_tensor_input] x[1:batchx2x8x8]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-JNS] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
dynamo-ir#
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 2, 8, 8]
init: name='rank_tensor' type=int64 shape=(1,) -- array([4])
init: name='val_0' type=int64 shape=(4,) -- array([2, 2, 8, 4])
init: name='val_1' type=int64 shape=(4,) -- array([128, 8, 16, 1])
init: name='neg_1' type=int64 shape=(1,) -- array([-1])
init: name='indices' type=int64 shape=() -- array([0])
init: name='rank_0' type=int64 shape=() -- array([4])
init: name='int64_1_cast' type=int64 shape=() -- array([1])
init: name='tmp_14' type=float32 shape=(1,) -- array([1.], dtype=float32)
Reshape(x, neg_1) -> self_flatten
SequenceEmpty() -> one_seq
Loop(rank_0, , indices, one_seq, body=G1) -> indices_16, one_seq_17
CastLike(indices, indices_16) -> storage_offset_cast
Add(indices_16, storage_offset_cast) -> indices_19
Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,indices_1,one_seq_2 -> cond_out,indices_13,one_seq_15
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='indices_1' type='NOTENSOR' shape=None
input: name='one_seq_2' type='NOTENSOR' shape=None
Equal(i, indices) -> cond
Sub(rank_0, i) -> tmp
Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Range(indices, size_dim_j, int64_1_cast) -> tmp_6
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
Expand(indices_1, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Mul(tmp_6, stride_dim_j) -> add_value
If(cond, then_branch=G2, else_branch=G3) -> shape_11
Reshape(add_value, shape_11) -> add_value_12
Add(indices_4, add_value_12) -> indices_13
SequenceInsert(one_seq_2, tmp_14) -> one_seq_15
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='indices_13' type='NOTENSOR' shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_2, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_2, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
tracing#
FAILED
The implementation is still incorrect, x='x', shape=('batch', 2, 8, 8), size=(2, 2, 8, 4), stride=(128, 8, 16, 1), storage_offset=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-IXK] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'x': ('batch', 2, 8, 8)}
_known_types={'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
as_strided -> {output}
x -> {as_strided}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 2, 8, 8])), ('val', torch.float32, torch.Size([s26, 2, 8, 8])))) --- 1:4:('batch', 2, 8, 8):
as_strided: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
AtenAsStrided()
def forward(self, x):
as_strided = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1)); x = None
return as_strided
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.as_strided](args = (%x, (2, 2, 8, 4), (128, 8, 16, 1)), kwargs = {})
return as_strided
-- process.inputs_to_remove --
set()
-- process.progress --
node 1/3 target=<built-in method as_strided of type object at 0x7515a8359180>
-- 1 INPUTS
[GraphBuilder-IXK.1.make_tensor_input] x[1:batchx2x8x8]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-IXK] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
AtenInterpolate#
code: yobx.torch._model_eval_cases.AtenInterpolate
forward#
def forward(self, x):
y = torch.nn.functional.interpolate(
x, scale_factor=2.0, mode="bilinear", recompute_scale_factor=False
)
return y
yobx#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> x::Shape:2
Concat(x::Shape:2, init7_s2_6_8, axis=0) -> _onx_concat_x::Shape:2
Resize(x, , , _onx_concat_x::Shape:2, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 2, 6, 8]
dynamo-ir#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='val_0' type=float32 shape=(4,) -- array([1., 1., 2., 2.], dtype=float32)
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=['batch', 2, 6, 8]
tracing#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> x::Shape:2
Concat(x::Shape:2, init7_s2_6_8, axis=0) -> _onx_concat_x::Shape:2
Resize(x, , , _onx_concat_x::Shape:2, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']
AtenNonZero#
code: yobx.torch._model_eval_cases.AtenNonZero
forward#
def forward(self, x):
y = torch.nonzero(x)
return y
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> val_0
Transpose(val_0, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['u0', 2]
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
AtenNonZeroTuple#
code: yobx.torch._model_eval_cases.AtenNonZeroTuple
forward#
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- ReshapeIsSqueezePattern.m1##ReshapeIsSqueezePattern.m1
NonZero(x) -> _onx_nonzero_x
Split(_onx_nonzero_x, num_outputs=2) -> _onx_split_nonzero_x_0, _onx_split_nonzero_x_1
Squeeze(_onx_split_nonzero_x_0, init7_s1_0) -> output_0
Squeeze(_onx_split_nonzero_x_1, init7_s1_0) -> output_1
output: name='output_0' type=dtype('int64') shape=['u0']
output: name='output_1' type=dtype('int64') shape=['u0']
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='val_7' type=int64 shape=(1,) -- array([1])
NonZero(x) -> val_0
Transpose(val_0, perm=[1,0]) -> nonzero
Split(nonzero, num_outputs=2, axis=-1) -> val_6, val_11
Squeeze(val_6, val_7) -> getitem
Squeeze(val_11, val_7) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['u0']
output: name='getitem_1' type=dtype('int64') shape=['u0']
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- ReshapeIsSqueezePattern.m1##ReshapeIsSqueezePattern.m1
NonZero(x) -> _onx_nonzero_x
Split(_onx_nonzero_x, num_outputs=2) -> _onx_split_nonzero_x_0, _onx_split_nonzero_x_1
Squeeze(_onx_split_nonzero_x_0, init7_s1_0) -> output_0
Squeeze(_onx_split_nonzero_x_1, init7_s1_0) -> output_1
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero']
output: name='output_1' type=dtype('int64') shape=['NEWDIM_nonzero']
AtenRollPos#
code: yobx.torch._model_eval_cases.AtenRollPos
forward#
def forward(self, x):
return torch.roll(x, 1, -1)
yobx#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir#
FAILED
An error occurred when running the '<onnx_ir.passes.PassManager object at 0x751521896db0>' pass after the following passes: ['<onnx_ir.passes.common.inliner.InlinePass object at 0x751521896b70>']
tracing#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
AtenRollRelu#
code: yobx.torch._model_eval_cases.AtenRollRelu
forward#
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
yobx#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> _onx_concat_slice_x
Relu(_onx_concat_slice_x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='val_0' type=int64 shape=(1,) -- array([-1])
init: name='val_2' type=int64 shape=(1,) -- array([1])
init: name='val_3' type=int64 shape=(1,) -- array([0])
Size(x) -> val_5
Reshape(val_5, val_0, allowzero=0) -> val_6
Slice(x, val_2, val_6, val_0) -> val_7
Slice(x, val_3, val_2, val_0) -> val_4
Concat(val_7, val_4, axis=-1) -> roll
Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=['batch', 3, 4]
tracing#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> _onx_concat_slice_x
Relu(_onx_concat_slice_x) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
BuildInIsInstance#
code: yobx.torch._model_eval_cases.BuildInIsInstance
forward#
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([0.33726388, 0.37609833, 0.23755449], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.20761113], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.53195345, 0.42383185, 0.12110724], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.5330608], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([-0.48647648], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.4364325 , -0.25060153, 0.14577079], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
BuildInLen#
code: yobx.torch._model_eval_cases.BuildInLen
forward#
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.43854806, -0.29138273, 0.4068576 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.32185757], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.08032564, -0.3425686 , -0.27984935], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.3142383], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([-0.44008237], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.43364686, -0.518422 , 0.35264233], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
ComplexPolar#
code: yobx.torch._model_eval_cases.ComplexPolar
forward#
def forward(self, x, angle):
return torch.polar(x, angle)
yobx#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='init14_s1_' type=complex64 shape=(1,) -- array([0.+1.j], dtype=complex64)-- Opset.make_node.1/Small
Cast(x, to=14) -> x::C14
Cos(angle) -> _onx_cos_angle
Cast(_onx_cos_angle, to=14) -> _onx_cos_angle::C14
Sin(angle) -> _onx_sin_angle
Cast(_onx_sin_angle, to=14) -> _onx_sin_angle::C14
Mul(_onx_sin_angle::C14, init14_s1_) -> _onx_mul_sin_angle::C14
Add(_onx_cos_angle::C14, _onx_mul_sin_angle::C14) -> _onx_add_cos_angle::C14
Mul(x::C14, _onx_add_cos_angle::C14) -> output_0
output: name='output_0' type=dtype('complex64') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_sin_angle::C14) of operator (Mul) in node (polar5) is invalid.
dynamo-ir#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='int64_m1_1d' type=int64 shape=(1,) -- array([-1])
Cos(angle) -> tmp
Mul(x, tmp) -> tmp_0
Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
Mul(x, tmp_1) -> tmp_2
Unsqueeze(tmp_2, int64_m1_1d) -> imag
Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=['batch', 4, 2]
FAILED
diff.0
tracing#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='init14_s1_' type=complex64 shape=(1,) -- array([0.+1.j], dtype=complex64)-- Opset.make_node.1/Small
Cast(x, to=14) -> x::C14
Cos(angle) -> _onx_cos_angle
Cast(_onx_cos_angle, to=14) -> _onx_cos_angle::C14
Sin(angle) -> _onx_sin_angle
Cast(_onx_sin_angle, to=14) -> _onx_sin_angle::C14
Mul(_onx_sin_angle::C14, init14_s1_) -> _onx_mul_sin_angle::C14
Add(_onx_cos_angle::C14, _onx_mul_sin_angle::C14) -> _onx_add_cos_angle::C14
Mul(x::C14, _onx_add_cos_angle::C14) -> output
output: name='output' type=dtype('complex64') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_sin_angle::C14) of operator (Mul) in node (polar5) is invalid.
ControlFlowCond#
code: yobx.torch._model_eval_cases.ControlFlowCond
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0
Cos(x) -> cos_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowCond2Inputs#
code: yobx.torch._model_eval_cases.ControlFlowCond2Inputs
forward#
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
yobx#
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> sin2
Add(sin2, y) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cos2
Add(cos2, y) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=['batch', 3]
output: name='getitem_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0,add_12_true_graph_0
Cos(x) -> cos
Add(cos, y) -> add_12_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='add_12_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0,add_12_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_2
Add(sin_2, y) -> add_12_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='add_12_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowCond2Outputs#
code: yobx.torch._model_eval_cases.ControlFlowCond2Outputs
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=['batch', 3]
output: name='getitem_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0,cos_true_graph_0
Cos(x) -> cos_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='cos_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0,sin_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='sin_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowCondConstant#
code: yobx.torch._model_eval_cases.ControlFlowCondConstant
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='init7_s2_1_10242_cst2init' type=int64 shape=(2,) -- array([ 1, 1024])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_10242_cst2init' type=int64 shape=(1,) -- array([1024])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
ConstantOfShape(init7_s2_1_10242_cst2init, value=[1.0]) -> ones2
Cos(x) -> cos2
Add(cos2, ones2) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Shape(x, end=1, start=0) -> x::Shape:12
Concat(x::Shape:12, init7_s1_10242_cst2init, axis=0) -> _onx_concat_sym_size_int_1::UnSq02
ConstantOfShape(_onx_concat_sym_size_int_1::UnSq02, value=[1.0]) -> ones32
Sin(x) -> sin2
Sub(sin2, ones32) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_7' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1024])
init: name='ones_2' type=float32 shape=(1, 1024)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> sub_3_true_graph_0
Shape(x, end=1, start=0) -> val_0_2
Concat(val_0_2, val_3, axis=0) -> val_4
Expand(val_7, val_4) -> ones
Sin(x) -> sin
Sub(sin, ones) -> sub_3_true_graph_0
output: name='sub_3_true_graph_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> add_6_false_graph_0
Cos(x) -> cos
Add(cos, ones_2) -> add_6_false_graph_0
output: name='add_6_false_graph_0' type=dtype('float32') shape=['batch', 1024]
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowCondIdentity_153832#
code: yobx.torch._model_eval_cases.ControlFlowCondIdentity_153832
forward#
def forward(self, x, y):
def branch_cond_then_1(x):
x = torch.abs(x) + 1
return x
def branch_cond_else_1(x):
return x # fails but succeeds with x.clone()
x = torch.cond(x.sum() > 0, branch_cond_then_1, branch_cond_else_1, [x])
return x + y
yobx#
FAILED
This higher order operator doesn't work unless it is captured completely with torch.compile. Got graph break/error:
Encountered aliasing during higher order op tracing
Higher Order Operator: torch.cond
Explanation: Higher order ops do not support aliasing. Found in <bound method HigherOrderOperator.name of <torch._higher_order_ops.cond.CondOp object at 0x751538ac7410>>
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 242, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1144, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UncapturedHigherOrderOpError'>: This higher order operator doesn't work unless it is captured completely with torch.compile. Got graph break/error:
Encountered aliasing during higher order op tracing
Higher Order Operator: torch.cond
Explanation: Higher order ops do not support aliasing. Found in <bound method HigherOrderOperator.name of <torch._higher_order_ops.cond.CondOp object at 0x751538ac7410>>
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 242, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1144, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
ControlFlowCondNestedModule#
code: yobx.torch._model_eval_cases.ControlFlowCondNestedModule
forward#
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
yobx#
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='init7_s_1002_cst2init' type=int64 shape=() -- array([100])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['d_output_0_0']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C12
Sub(x::C12, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Abs(x) -> abs_12
ReduceSum(abs_12, keepdims=0) -> sum_122
Greater(sum_122, init7_s_1002_cst2init) -> gt22
If(gt22, else_branch=G3, then_branch=G4) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> cond#0
Cast(x, to=1) -> x::C132
Div(x::C132, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> cond#0
Cast(x, to=1) -> x::C142
Mul(x::C142, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C132
Div(x::C132, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C142
Mul(x::C142, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('int64') shape=['batch']
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_0_2' type=int64 shape=() -- array([100])
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, val_0) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Abs(x) -> abs_1
ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Greater(sum_1_2, val_0_2) -> gt_2
If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=2 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=2 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=1 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=1 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> sub_1_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_1_false_graph_0
output: name='sub_1_false_graph_0' type=dtype('float32') shape=['batch']
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowCondNonZero#
code: yobx.torch._model_eval_cases.ControlFlowCondNonZero
forward#
def forward(self, input_ids, image_features, vocab_size):
def then_branch(input_ids, image_features, vocab_size):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
condition = (input_ids < 0) & (input_ids > -int(1e9))
positions = torch.nonzero(condition, as_tuple=True)
input_ids = input_ids.clamp_min(0).clamp_max(vocab_size)
return (input_ids, positions[0], positions[1])
def else_branch(input_ids, image_features, vocab_size):
r = torch.where(torch.zeros((1, 1), dtype=torch.bool))
return (input_ids, r[0], r[1])
a, b, c = torch.cond(
image_features.numel() > 0,
then_branch,
else_branch,
[input_ids, image_features, vocab_size],
)
return a, b, c
yobx#
FAILED
Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'RuntimeError'>: Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
(Refer to the full stack trace above for more information.)
tracing#
FAILED
val is None for node=output, output=(getitem, getitem_1, getitem_2), a='getitem', o='output_0', has_type=False, has_rank=False, has_shape=False,
meta={}
node.__dict__={}
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-PGA] Message starts, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.
input_names=['input_ids', 'image_features', 'vocab_size']
output_names=[]
--LOCAL FUNCTIONS--
local_functions,_cb_cond_then_branch_0(['arg0', 'arg1', 'arg2']) -> ['output_0', 'output_1', 'output_2']
local_functions,_cb_cond_else_branch_0(['arg0', 'arg1', 'arg2']) -> ['output_0', 'output_1', 'output_2']
--CONSTRAINTS--
batch = {'s26'}
s19 = {'seq_length'}
s26 = {'batch'}
seq_length = {'s19'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
seq_length = WrapSym(seq_length)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
'seq_length' = <class 'list'>
tuple
'seq_length'
ERR**: <class 'torch.SymInt'>:'seq_length'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'input_ids'},
{'axis': 0, 'input_name': 'image_features'}],
'seq_length': [{'axis': 1, 'input_name': 'image_features'}]}
dynamic_dimensions_source_flat=[0, 1]
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s19': 'seq_length', 's26': 'batch'}
dynamic_shapes=({0: Dim('batch', min=0)},
{0: Dim('batch', min=0), 1: Dim('seq_length', min=0)},
None)
_known_shapes={'gt': (),
'image_features': ('batch', 'seq_length'),
'init7_s1_1': (1,),
'init7_s_0': (),
'init7_s_0::RSh1': (1,),
'input_ids': ('batch', 12),
'numel': (),
'numel::RSh1': (1,),
'vocab_size': ()}
_known_types={'gt': 9,
'image_features': 1,
'init7_s1_1': 7,
'init7_s_0': 7,
'init7_s_0::RSh1': 7,
'input_ids': 7,
'numel': 7,
'numel::RSh1': 7,
'vocab_size': 7}
_known_devices={'gt': -1,
'image_features': -1,
'input_ids': -1,
'numel': -1,
'numel::RSh1': -1,
'vocab_size': -1}
_context=[]
_known_value_shape={'numel': 'batch*seq_length'}
_known_constants=['init7_s1_1', 'init7_s_0', 'init7_s_0::RSh1']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
_cb_cond_else_branch_0 -> {condcc}
_cb_cond_then_branch_0 -> {condcc}
condcc -> {getitem_2, getitem_1, getitem}
getitem -> {output}
getitem_1 -> {output}
getitem_2 -> {output}
gt -> {condcc}
image_features -> {condcc, numel}
input_ids -> {condcc}
numel -> {gt}
output -> set()
vocab_size -> {condcc}
--TORCH-SHAPES--
input_ids: ('run_node', (('example_value', torch.int64, torch.Size([2, 12])), ('val', torch.int64, torch.Size([s26, 12])))) --- 7:2:('batch', 12):
image_features: ('run_node', (('example_value', torch.float32, torch.Size([2, 16])), ('val', torch.float32, torch.Size([s26, s19])))) --- 1:2:('batch', 'seq_length'):
vocab_size: ('run_node', (('example_value', torch.int64, torch.Size([])), ('val', torch.int64, torch.Size([])))) --- 7:0:():
numel: ('run_node', ('', '')) --- 7:0:():
gt: ('run_node', ('', '')) --- 9:0:():
_cb_cond_then_branch_0: ('run_node', ('', '')) --- :::
_cb_cond_else_branch_0: ('run_node', ('', '')) --- :::
condcc: ('run_node', ('', '')) --- :::
getitem: ('run_node', ('', '')) --- :::
getitem_1: ('run_node', ('', '')) --- :::
getitem_2: ('run_node', ('', '')) --- :::
output: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
ControlFlowCondNonZero()
def forward(self, input_ids, image_features, vocab_size):
numel = image_features.numel()
gt = numel > 0; numel = None
_cb_cond_then_branch_0 = self._cb_cond_then_branch_0
_cb_cond_else_branch_0 = self._cb_cond_else_branch_0
condcc = torch.ops.higher_order.cond(gt, _cb_cond_then_branch_0, _cb_cond_else_branch_0, [input_ids, image_features, vocab_size]); gt = _cb_cond_then_branch_0 = _cb_cond_else_branch_0 = input_ids = image_features = vocab_size = None
getitem = condcc[0]
getitem_1 = condcc[1]
getitem_2 = condcc[2]; condcc = None
return (getitem, getitem_1, getitem_2)
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%input_ids : [num_users=1] = placeholder[target=input_ids]
%image_features : [num_users=2] = placeholder[target=image_features]
%vocab_size : [num_users=1] = placeholder[target=vocab_size]
%numel : [num_users=1] = call_method[target=numel](args = (%image_features,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%numel, 0), kwargs = {})
%_cb_cond_then_branch_0 : [num_users=1] = get_attr[target=_cb_cond_then_branch_0]
%_cb_cond_else_branch_0 : [num_users=1] = get_attr[target=_cb_cond_else_branch_0]
%condcc : [num_users=3] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_then_branch_0, %_cb_cond_else_branch_0, [%input_ids, %image_features, %vocab_size]), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 2), kwargs = {})
return (getitem, getitem_1, getitem_2)
-- process.inputs_to_remove --
set()
-- process.progress --
node 11/12 target=output
-- 3 INPUTS
[GraphBuilder-PGA.1.make_tensor_input] input_ids[7:batchx12]
[GraphBuilder-PGA.1.make_tensor_input] image_features[1:batchxseq_length]
[GraphBuilder-PGA.1.make_tensor_input] vocab_size[7:]
-- 2 INITIALIZERS
[GraphBuilder-PGA.1.make_initializer] init7_s_0[int64:int64:[0]] - SOURCE: shape_type_compute._cast_inputs.1(gt)
[GraphBuilder-PGA.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-PGA.4.make_node] meth_numel [@:@ ] Size:['image_features']->['numel']
[GraphBuilder-PGA.4.make_node] gt [@#:@ ] Reshape:['numel', 'init7_s1_1']->['numel::RSh1']
[GraphBuilder-PGA.4.make_node] gt2 [##:# ] Reshape:['init7_s_0', 'init7_s1_1']->['init7_s_0::RSh1']
[GraphBuilder-PGA.4.make_node] gt3 [@#:@ ] Greater:['numel', 'init7_s_0']->['gt']
[GraphBuilder-PGA.4.make_node] cond [@:--- ] If:['gt']->['condcc#0', 'condcc#1', 'condcc#2']
[GraphBuilder-PGA.4.make_node] -- subgraph 'else_branch'...
_cb_cond_else_branch_0[local_functions](input_ids, image_features, vocab_size) -> condcc#0, condcc#1, condcc#2
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
output: name='condcc#2' type='NOTENSOR' shape=None
[GraphBuilder-PGA.4.make_node] -- subgraph 'else_branch' -- done
[GraphBuilder-PGA.4.make_node] -- subgraph 'then_branch'...
_cb_cond_then_branch_0[local_functions](input_ids, image_features, vocab_size) -> condcc#0, condcc#1, condcc#2
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
output: name='condcc#2' type='NOTENSOR' shape=None
[GraphBuilder-PGA.4.make_node] -- subgraph 'then_branch' -- done
[GraphBuilder-PGA.4.make_node] getitemB_tuple [-:- ] Identity:['condcc#0']->['getitem']
[GraphBuilder-PGA.4.make_node] getitemB_tuple2 [-:- ] Identity:['condcc#1']->['getitem_1']
[GraphBuilder-PGA.4.make_node] getitemB_tuple3 [-:- ] Identity:['condcc#2']->['getitem_2']
[GraphBuilder-PGA.4.make_node] .output [-:- ] Identity:['getitem']->['output_0']
[GraphBuilder-PGA.4.make_node] .output2 [-:- ] Identity:['getitem_1']->['output_1']
[GraphBuilder-PGA.4.make_node] .output3 [-:- ] Identity:['getitem_2']->['output_2']
-- 0 OUTPUTS
[GraphBuilder-PGA] Message completed, there are 2 initializers, 11 nodes, 3 inputs, 3 outputs.
ControlFlowNestCond#
code: yobx.torch._model_eval_cases.ControlFlowNestCond
forward#
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Neg(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
ReduceSum(x, keepdims=0) -> sum_122
Less(sum_122, init1_s_) -> lt2
If(lt2, else_branch=G3, then_branch=G4) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Less(sum_1_2, scalar_tensor_default) -> lt
If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=2 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=2 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=1 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=1 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
FAILED
aten_meth_sum() missing 1 required positional argument: 'axis'
ControlFlowScan#
code: yobx.torch._model_eval_cases.ControlFlowScan
forward#
def forward(self, x):
init = torch.zeros_like(x[0])
carry, _out = torch.ops.higher_order.scan(
ControlFlowScan.add, [init], [x], additional_inputs=[]
)
return carry
yobx#
inputs:
#1[(T1s3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
dynamo-ir#
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: scan might be aliasing the input or the output!
While executing %scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
Original traceback:
File "~/github/yet-another-onnx-builder/yobx/torch/_model_eval_cases.py", line 499, in forward
carry, _out = torch.ops.higher_order.scan(
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unable to symbolically trace the HigherOrderOperator scan because it received an arbitrary callable argument <function ControlFlowScan.add at 0x751536a377e0>. Use make_fx or dynamo tracing instead.
ControlFlowScan2Carried#
code: yobx.torch._model_eval_cases.ControlFlowScan2Carried
forward#
def forward(self, x):
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
ControlFlowScan2Carried.add,
[init1, init2],
[x, x * 2],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return carry1, carry2, out1, out2
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Mul(x, init1_s_::RSh1) -> _onx_mul_x
Scan(zeros_like, ones_like, x, _onx_mul_x, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['batch', 4]
output: name='output_3' type=dtype('float32') shape=['batch', 4]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type='NOTENSOR' shape=None
input: name='init_1_ones_like' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
input: name='scan_1_mul' type='NOTENSOR' shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_2
Mul(init_1_ones_like, scan_1_mul) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
output: name='output_2' type='NOTENSOR' shape=None
output: name='output_3' type='NOTENSOR' shape=None
dynamo-ir#
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: scan might be aliasing the input or the output!
While executing %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
Original traceback:
File "~/github/yet-another-onnx-builder/yobx/torch/_model_eval_cases.py", line 518, in forward
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unable to symbolically trace the HigherOrderOperator scan because it received an arbitrary callable argument <function ControlFlowScan2Carried.add at 0x751536a379c0>. Use make_fx or dynamo tracing instead.
ControlFlowScanCDist#
code: yobx.torch._model_eval_cases.ControlFlowScanCDist
forward#
def forward(self, x):
_carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDist.dist,
[x],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init1_s_2_cst2init' type=float32 shape=() -- array([0.5], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Identity(init_0_x) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_x, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Pow(sum_12, init1_s_2_cst2init) -> output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='val_3' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_4' type=int64 shape=(1,) -- array([1])
init: name='val_5' type=float32 shape=() -- array([0.5], dtype=float32)
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- x_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in_1 -> clone_scan_combine_graph_0,pow_1_scan_combine_graph_0
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s77', 4]
input: name='x_scan_combine_graph_0__subgraph_in_1' type=dtype('float32') shape=[4]
Identity(x_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in_1, val_3, allowzero=1) -> view
Sub(x_scan_combine_graph_0__subgraph_in, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
ReduceSum(mul_4, val_4, noop_with_empty_axes=0, keepdims=0) -> sum_1
Pow(sum_1, val_5) -> pow_1_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=['batch', 4]
output: name='pow_1_scan_combine_graph_0' type=dtype('float32') shape=['batch']
tracing#
FAILED
Unable to symbolically trace the HigherOrderOperator scan because it received an arbitrary callable argument <function ControlFlowScanCDist.dist at 0x751536a37ba0>. Use make_fx or dynamo tracing instead.
ControlFlowScanCDist2#
code: yobx.torch._model_eval_cases.ControlFlowScanCDist2
forward#
def forward(self, x):
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
ControlFlowScanCDist2.dist,
[z],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[y],
)
return out[1]
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Identity(init_0_detach_) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(hidden_input_scan_0_clone, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='clone' type=float32 shape=(1,) -- array([0.], dtype=float32)
init: name='val_3' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_4' type=int64 shape=(1,) -- array([1])
Scan(clone, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- clone_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in -> clone_scan_combine_graph_0,sqrt_scan_combine_graph_0
input: name='clone_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=[1]
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=[4]
Identity(clone_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in, val_3, allowzero=1) -> view
Sub(x, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
ReduceSum(mul_4, val_4, noop_with_empty_axes=0, keepdims=0) -> sum_1
Sqrt(sum_1) -> sqrt_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=[1]
output: name='sqrt_scan_combine_graph_0' type=dtype('float32') shape=['batch']
tracing#
FAILED
(CustomProxy(clone),) can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'yobx.torch.tracing.CustomProxy'>,)
ControlFlowScanCDistXY#
code: yobx.torch._model_eval_cases.ControlFlowScanCDistXY
forward#
def forward(self, x, y):
_carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDistXY.dist,
[y],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
yobx#
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type='NOTENSOR' shape=None
input: name='scan_0_x' type='NOTENSOR' shape=None
Identity(init_0_y) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_y, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type='NOTENSOR' shape=None
output: name='output_1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
init: name='val_5' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_6' type=int64 shape=(1,) -- array([1])
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- y_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in -> clone_scan_combine_graph_0,sqrt_scan_combine_graph_0
input: name='y_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s17', 's27']
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s27']
Identity(y_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in, val_5, allowzero=1) -> view
Sub(y_scan_combine_graph_0__subgraph_in, view) -> sub_4
Mul(sub_4, sub_4) -> mul_7
ReduceSum(mul_7, val_6, noop_with_empty_axes=0, keepdims=0) -> sum_1
Sqrt(sum_1) -> sqrt_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=['y_rows', 'dim']
output: name='sqrt_scan_combine_graph_0' type=dtype('float32') shape=['y_rows']
tracing#
FAILED
Unable to symbolically trace the HigherOrderOperator scan because it received an arbitrary callable argument <function ControlFlowScanCDistXY.dist at 0x751536a37f60>. Use make_fx or dynamo tracing instead.
ControlFlowScanDecomposition_151564#
code: yobx.torch._model_eval_cases.ControlFlowScanDecomposition_151564
forward#
def forward(self, images, position):
return self.select_when_exporting(self.dummy_loop, self.dummy_loop_with_scan)(
images, position
)
yobx#
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
opset: domain='' version=21
opset: domain='aten' version=1
opset: domain='local_functions' version=1
input: name='images' type=dtype('float32') shape=['batch', 'channel']
input: name='position' type=dtype('int64') shape=['batch_1']
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(images, position, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'channel']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- scan_0_images,scan_1_position -> output_0
input: name='scan_0_images' type='NOTENSOR' shape=None
input: name='scan_1_position' type='NOTENSOR' shape=None
Shape(scan_0_images, end=1, start=0) -> padded_1::Shape:12
ConstantOfShape(padded_1::Shape:12, value=[0.0]) -> zeros2
Unsqueeze(scan_1_position, init7_s1_02_cst2init) -> item::UnSq02
Slice(scan_0_images, init7_s1_02_cst2init, item::UnSq02, init7_s1_02_cst2init) -> slice_12
aten_setitem[aten](zeros2, scan_1_position, slice_12) -> output_0
output: name='output_0' type='NOTENSOR' shape=None
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'zeros'
input: 'item'
input: 'slice_1'
Constant(value=[0]) -> init7_s1_0
Unsqueeze(item, init7_s1_0) -> item::UnSq0
Shape(zeros) -> zeros::Shape:
Slice(zeros, item::UnSq0, zeros::Shape:, init7_s1_0) -> _onx_slice_zeros
Concat(slice_1, _onx_slice_zeros, axis=0) -> setitem
output: name='setitem' type=? shape=?
dynamo-ir#
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
opset: domain='' version=20
input: name='images' type=dtype('float32') shape=['s34', 's90']
input: name='position' type=dtype('int64') shape=['s71']
init: name='val_13' type=int64 shape=(1,) -- array([0])
init: name='val_37' type=int64 shape=(1,) -- array([1])
init: name='val_1' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=int64 shape=(1,) -- array([-1])
init: name='val_7' type=int64 shape=() -- array([0])
init: name='val_10' type=int64 shape=() -- array([1])
Scan(images, position, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_directions=[0]) -> getitem
output: name='getitem' type=dtype('float32') shape=['s34', 's90']
----- subgraph ---- Scan - node_scan__0 - att.body=G1 -- level=1 -- images_scan_combine_graph_0__subgraph_in,position_scan_combine_graph_0__subgraph_in -> slice_scatter_scan_combine_graph_0
input: name='images_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s90']
input: name='position_scan_combine_graph_0__subgraph_in' type=dtype('int64') shape=None
Reshape(position_scan_combine_graph_0__subgraph_in, val_5, allowzero=0) -> val_6
Gather(val_6, val_7, axis=0) -> val_8
Reshape(val_8, val_5, allowzero=0) -> val_16
Slice(images_scan_combine_graph_0__subgraph_in, val_13, val_16, val_13, val_37) -> copy
Shape(images_scan_combine_graph_0__subgraph_in, end=1, start=0) -> val_0
Expand(val_1, val_0) -> zeros
Shape(zeros, start=0) -> val_32
Gather(val_32, val_7, axis=0) -> val_33
Range(val_7, val_33, val_10) -> val_34
Unsqueeze(val_8, val_13) -> val_36
Slice(val_34, val_13, val_36, val_13, val_37) -> val_38
Unsqueeze(val_38, val_5) -> val_39
ScatterND(zeros, val_39, copy, reduction=b'none') -> slice_scatter_scan_combine_graph_0
output: name='slice_scatter_scan_combine_graph_0' type=dtype('float32') shape=['s90']
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
ControlFlowScanInplace_153705#
code: yobx.torch._model_eval_cases.ControlFlowScanInplace_153705
forward#
def forward(self, x, y):
def loop_body_1(z, iv, x, y):
z = z.clone()
i = iv.item()
z[i, :] = ((x[i, :] - y) ** 2).sum(dim=-1)
return [z, iv]
z = torch.empty((x.shape[0], y.shape[0]))
r = torch.ops.higher_order.scan(
loop_body_1, [z], [torch.arange(x.shape[0], dtype=torch.int64)], [x, y]
)
return r[0]
yobx#
FAILED
only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'IndexError'>: only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
ControlFlowWhileDec#
code: yobx.torch._model_eval_cases.ControlFlowWhileDec
forward#
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i > 0
def body_fn(i, x, y):
return i - 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
yobx#
FAILED
Unable to interpret function <class 'str'>: 'aten_while_loop', searched for ['aten_while_loop'] and attributes [], args=(while_loop_cond_graph_0, while_loop_body_graph_0, (ci, a, b), ()), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-QIQ] Message starts, there are 0 initializers, 0 nodes, 3 inputs, 3 outputs.
input_names=['ci', 'a', 'b']
output_names=[]
--LOCAL FUNCTIONS--
local_functions,while_loop_cond_graph_0(['arg0_1', 'arg1_1', 'arg2_1']) -> ['output']
local_functions,while_loop_body_graph_0(['arg0_1', 'arg1_1', 'arg2_1']) -> ['output_dim_0', 'output_1', 'output_2']
--CONSTRAINTS--
DYN0 = {'s97'}
DYN1 = {'s98'}
DYN2 = {'s52'}
s52 = {'DYN2'}
s97 = {'DYN0'}
s98 = {'DYN1'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
DYN0 = WrapSym(DYN0)
DYN1 = WrapSym(DYN1)
DYN2 = WrapSym(DYN2)
dynamic_objects_rev=
'DYN0' = <class 'list'>
tuple
'DYN0'
ERR**: <class 'torch.SymInt'>:'DYN0'
'DYN1' = <class 'list'>
tuple
'DYN1'
ERR**: <class 'torch.SymInt'>:'DYN1'
'DYN2' = <class 'list'>
tuple
'DYN2'
ERR**: <class 'torch.SymInt'>:'DYN2'
dynamic_dimensions_source={'DYN0': [{'axis': 0, 'input_name': 'a'}],
'DYN1': [{'axis': 1, 'input_name': 'a'}],
'DYN2': [{'axis': 0, 'input_name': 'b'}]}
dynamic_dimensions_source_flat=[0, 1, 2]
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s52': 'DYN2', 's97': 'DYN0', 's98': 'DYN1'}
dynamic_shapes=({}, {0: Dim('DYN0', min=0), 1: Dim('DYN1', min=0)}, {0: Dim('DYN2', min=0)})
_known_shapes={'a': ('DYN0', 'DYN1'), 'b': ('DYN2', 3), 'ci': ()}
_known_types={'a': 1, 'b': 1, 'ci': 7}
_known_devices={'a': -1, 'b': -1, 'ci': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
a -> {while_loop}
b -> {while_loop}
ci -> {while_loop}
while_loop -> {getitem_2, getitem, getitem_1}
while_loop_body_graph_0 -> {while_loop}
while_loop_cond_graph_0 -> {while_loop}
--TORCH-SHAPES--
ci: ('run_node', ('', ('val', torch.int64, torch.Size([])))) --- 7:0:():
a: ('run_node', ('', ('val', torch.float32, torch.Size([s52, s98])))) --- 1:2:('DYN0', 'DYN1'):
b: ('run_node', ('', ('val', torch.float32, torch.Size([s52, 3])))) --- 1:2:('DYN2', 3):
while_loop_cond_graph_0: ('run_node', ('', '')) --- :::
while_loop_body_graph_0: ('run_node', ('', '')) --- :::
while_loop: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
ExportedProgram:
class GraphModule(torch.nn.Module):
def forward(self, ci: "i64[]", a: "f32[s52, s98]", b: "f32[s52, 3]"):
# File: <eval_with_key>.1904:10 in forward, code: while_loop = torch.ops.higher_order.while_loop(cond_fn_0, body_fn_0, (l_args_2_0_, l_args_2_1_, l_args_2_2_), ()); cond_fn_0 = body_fn_0 = l_args_2_0_ = l_args_2_1_ = l_args_2_2_ = None
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (ci, a, b), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = ci = a = b = None
getitem: "i64[]" = while_loop[0]
getitem_1: "f32[s52, s98]" = while_loop[1]
getitem_2: "f32[s52, 3]" = while_loop[2]; while_loop = None
return (getitem, getitem_1, getitem_2)
class while_loop_cond_graph_0(torch.nn.Module):
def forward(self, arg0_1: "i64[]", arg1_1: "f32[s52, s98]", arg2_1: "f32[s52, 3]"):
# File: <eval_with_key>.1905:5 in forward, code: gt = child > 0; child = None
gt: "b8[]" = torch.ops.aten.gt.Scalar(arg0_1, 0); arg0_1 = None
return gt
class while_loop_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "i64[]", arg1_1: "f32[s52, s98]", arg2_1: "f32[s52, 3]"):
# File: <eval_with_key>.1906:5 in forward, code: child = child_3 - 1; child_3 = None
sub: "i64[]" = torch.ops.aten.sub.Tensor(arg0_1, 1); arg0_1 = None
# File: <eval_with_key>.1906:6 in forward, code: child_6 = child_4 + child_5
add: "f32[s52, s98]" = torch.ops.aten.add.Tensor(arg1_1, arg2_1)
# File: <eval_with_key>.1906:7 in forward, code: child_7 = child_5 - child_4; child_5 = child_4 = None
sub_1: "f32[s52, 3]" = torch.ops.aten.sub.Tensor(arg2_1, arg1_1); arg2_1 = arg1_1 = None
return (sub, add, sub_1)
Graph signature:
# inputs
ci: USER_INPUT
a: USER_INPUT
b: USER_INPUT
# outputs
getitem: USER_OUTPUT
getitem_1: USER_OUTPUT
getitem_2: USER_OUTPUT
Range constraints: {s52: VR[2, int_oo], s98: VR[3, 3]}
-- process.graph_module.graph --
graph():
%ci : [num_users=1] = placeholder[target=ci]
%a : [num_users=1] = placeholder[target=a]
%b : [num_users=1] = placeholder[target=b]
%while_loop_cond_graph_0 : [num_users=1] = get_attr[target=while_loop_cond_graph_0]
%while_loop_body_graph_0 : [num_users=1] = get_attr[target=while_loop_body_graph_0]
%while_loop : [num_users=3] = call_function[target=torch.ops.higher_order.while_loop](args = (%while_loop_cond_graph_0, %while_loop_body_graph_0, (%ci, %a, %b), ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 2), kwargs = {})
return (getitem, getitem_1, getitem_2)
-- process.inputs_to_remove --
set()
-- process.progress --
node 5/10 target=while_loop
-- 3 INPUTS
[GraphBuilder-QIQ.1.make_tensor_input] ci[7:]
[GraphBuilder-QIQ.1.make_tensor_input] a[1:DYN0xDYN1]
[GraphBuilder-QIQ.1.make_tensor_input] b[1:DYN2x3]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-QIQ] Message completed, there are 0 initializers, 0 nodes, 3 inputs, 3 outputs.,
dynamo-ir#
inputs:
#1[(T7s,T1s2x3,T1s2x3)]shapes:
({},{0:DYNAMIC,1:DYNAMIC},{0:DYNAMIC})
opset: domain='' version=20
input: name='ci' type=dtype('int64') shape=None
input: name='a' type=dtype('float32') shape=['s52', 's98']
input: name='b' type=dtype('float32') shape=['s52', 3]
init: name='val_0_2' type=int64 shape=() -- array([0])
init: name='val_0_3' type=int64 shape=() -- array([1])
Greater(ci, val_0_2) -> val_0
Loop(, val_0, ci, a, b, body=G1) -> getitem, getitem_1, getitem_2
output: name='getitem' type=dtype('int64') shape=None
output: name='getitem_1' type=dtype('float32') shape=['s52', 's98']
output: name='getitem_2' type=dtype('float32') shape=['s52', 3]
----- subgraph ---- Loop - node_while_loop__2 - att.body=G1 -- level=1 -- iter_num_while_loop_body_graph_0,cond_in_while_loop_body_graph_0,ci_while_loop_body_graph_0__subgraph_in,a_while_loop_body_graph_0__subgraph_in,b_while_loop_body_graph_0__subgraph_in -> gt_while_loop_cond_graph_0,sub_3_while_loop_body_graph_0,add_6_while_loop_body_graph_0,sub_6_while_loop_body_graph_0
input: name='iter_num_while_loop_body_graph_0' type=dtype('int64') shape=None
input: name='cond_in_while_loop_body_graph_0' type=dtype('bool') shape=None
input: name='ci_while_loop_body_graph_0__subgraph_in' type=dtype('int64') shape=None
input: name='a_while_loop_body_graph_0__subgraph_in' type=dtype('float32') shape=['s52', 's98']
input: name='b_while_loop_body_graph_0__subgraph_in' type=dtype('float32') shape=['s52', 3]
Add(a_while_loop_body_graph_0__subgraph_in, b_while_loop_body_graph_0__subgraph_in) -> add_6_while_loop_body_graph_0
Sub(ci_while_loop_body_graph_0__subgraph_in, val_0_3) -> sub_3_while_loop_body_graph_0
Greater(sub_3_while_loop_body_graph_0, val_0_2) -> gt_while_loop_cond_graph_0
Sub(b_while_loop_body_graph_0__subgraph_in, a_while_loop_body_graph_0__subgraph_in) -> sub_6_while_loop_body_graph_0
output: name='gt_while_loop_cond_graph_0' type=dtype('bool') shape=None
output: name='sub_3_while_loop_body_graph_0' type=dtype('int64') shape=None
output: name='add_6_while_loop_body_graph_0' type=dtype('float32') shape=['s52', 3]
output: name='sub_6_while_loop_body_graph_0' type=dtype('float32') shape=['s52', 3]
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
ControlFlowWhileInc#
code: yobx.torch._model_eval_cases.ControlFlowWhileInc
forward#
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i < x.size(0)
def body_fn(i, x, y):
return i + 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
yobx#
FAILED
Unable to interpret function <class 'str'>: 'aten_while_loop', searched for ['aten_while_loop'] and attributes [], args=(while_loop_cond_graph_0, while_loop_body_graph_0, (ci, a, b), ()), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-TPG] Message starts, there are 0 initializers, 0 nodes, 3 inputs, 3 outputs.
input_names=['ci', 'a', 'b']
output_names=[]
--LOCAL FUNCTIONS--
local_functions,while_loop_cond_graph_0(['arg0_1', 'arg1_1', 'arg2_1']) -> ['output']
local_functions,while_loop_body_graph_0(['arg0_1', 'arg1_1', 'arg2_1']) -> ['output_dim_0', 'output_1', 'output_2']
--CONSTRAINTS--
DYN0 = {'s97'}
DYN1 = {'s98'}
DYN2 = {'s52'}
s52 = {'DYN2'}
s97 = {'DYN0'}
s98 = {'DYN1'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
DYN0 = WrapSym(DYN0)
DYN1 = WrapSym(DYN1)
DYN2 = WrapSym(DYN2)
dynamic_objects_rev=
'DYN0' = <class 'list'>
tuple
'DYN0'
ERR**: <class 'torch.SymInt'>:'DYN0'
'DYN1' = <class 'list'>
tuple
'DYN1'
ERR**: <class 'torch.SymInt'>:'DYN1'
'DYN2' = <class 'list'>
tuple
'DYN2'
ERR**: <class 'torch.SymInt'>:'DYN2'
dynamic_dimensions_source={'DYN0': [{'axis': 0, 'input_name': 'a'}],
'DYN1': [{'axis': 1, 'input_name': 'a'}],
'DYN2': [{'axis': 0, 'input_name': 'b'}]}
dynamic_dimensions_source_flat=[0, 1, 2]
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s52': 'DYN2', 's97': 'DYN0', 's98': 'DYN1'}
dynamic_shapes=({}, {0: Dim('DYN0', min=0), 1: Dim('DYN1', min=0)}, {0: Dim('DYN2', min=0)})
_known_shapes={'a': ('DYN0', 'DYN1'), 'b': ('DYN2', 3), 'ci': ()}
_known_types={'a': 1, 'b': 1, 'ci': 7}
_known_devices={'a': -1, 'b': -1, 'ci': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
a -> {while_loop}
b -> {while_loop}
ci -> {while_loop}
while_loop -> {getitem_2, getitem_1, getitem}
while_loop_body_graph_0 -> {while_loop}
while_loop_cond_graph_0 -> {while_loop}
--TORCH-SHAPES--
ci: ('run_node', ('', ('val', torch.int64, torch.Size([])))) --- 7:0:():
a: ('run_node', ('', ('val', torch.float32, torch.Size([s52, s98])))) --- 1:2:('DYN0', 'DYN1'):
b: ('run_node', ('', ('val', torch.float32, torch.Size([s52, 3])))) --- 1:2:('DYN2', 3):
while_loop_cond_graph_0: ('run_node', ('', '')) --- :::
while_loop_body_graph_0: ('run_node', ('', '')) --- :::
while_loop: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
ExportedProgram:
class GraphModule(torch.nn.Module):
def forward(self, ci: "i64[]", a: "f32[s52, s98]", b: "f32[s52, 3]"):
# File: <eval_with_key>.2035:10 in forward, code: while_loop = torch.ops.higher_order.while_loop(cond_fn_0, body_fn_0, (l_args_2_0_, l_args_2_1_, l_args_2_2_), ()); cond_fn_0 = body_fn_0 = l_args_2_0_ = l_args_2_1_ = l_args_2_2_ = None
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (ci, a, b), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = ci = a = b = None
getitem: "i64[]" = while_loop[0]
getitem_1: "f32[s52, s98]" = while_loop[1]
getitem_2: "f32[s52, 3]" = while_loop[2]; while_loop = None
return (getitem, getitem_1, getitem_2)
class while_loop_cond_graph_0(torch.nn.Module):
def forward(self, arg0_1: "i64[]", arg1_1: "f32[s52, s98]", arg2_1: "f32[s52, 3]"):
# No stacktrace found for following nodes
sym_size_int_3: "Sym(s52)" = torch.ops.aten.sym_size.int(arg2_1, 0); arg2_1 = None
# File: <eval_with_key>.2036:6 in forward, code: lt = child < sym_size_int; child = sym_size_int = None
lt: "b8[]" = torch.ops.aten.lt.Scalar(arg0_1, sym_size_int_3); arg0_1 = sym_size_int_3 = None
return lt
class while_loop_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "i64[]", arg1_1: "f32[s52, s98]", arg2_1: "f32[s52, 3]"):
# File: <eval_with_key>.2037:5 in forward, code: child = child_3 + 1; child_3 = None
add: "i64[]" = torch.ops.aten.add.Tensor(arg0_1, 1); arg0_1 = None
# File: <eval_with_key>.2037:6 in forward, code: child_6 = child_4 + child_5
add_1: "f32[s52, s98]" = torch.ops.aten.add.Tensor(arg1_1, arg2_1)
# File: <eval_with_key>.2037:7 in forward, code: child_7 = child_5 - child_4; child_5 = child_4 = None
sub: "f32[s52, 3]" = torch.ops.aten.sub.Tensor(arg2_1, arg1_1); arg2_1 = arg1_1 = None
return (add, add_1, sub)
Graph signature:
# inputs
ci: USER_INPUT
a: USER_INPUT
b: USER_INPUT
# outputs
getitem: USER_OUTPUT
getitem_1: USER_OUTPUT
getitem_2: USER_OUTPUT
Range constraints: {s52: VR[2, int_oo], s98: VR[3, 3]}
-- process.graph_module.graph --
graph():
%ci : [num_users=1] = placeholder[target=ci]
%a : [num_users=1] = placeholder[target=a]
%b : [num_users=1] = placeholder[target=b]
%while_loop_cond_graph_0 : [num_users=1] = get_attr[target=while_loop_cond_graph_0]
%while_loop_body_graph_0 : [num_users=1] = get_attr[target=while_loop_body_graph_0]
%while_loop : [num_users=3] = call_function[target=torch.ops.higher_order.while_loop](args = (%while_loop_cond_graph_0, %while_loop_body_graph_0, (%ci, %a, %b), ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%while_loop, 2), kwargs = {})
return (getitem, getitem_1, getitem_2)
-- process.inputs_to_remove --
set()
-- process.progress --
node 5/10 target=while_loop
-- 3 INPUTS
[GraphBuilder-TPG.1.make_tensor_input] ci[7:]
[GraphBuilder-TPG.1.make_tensor_input] a[1:DYN0xDYN1]
[GraphBuilder-TPG.1.make_tensor_input] b[1:DYN2x3]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-TPG] Message completed, there are 0 initializers, 0 nodes, 3 inputs, 3 outputs.,
dynamo-ir#
inputs:
#1[(T7s,T1s2x3,T1s2x3)]shapes:
({},{0:DYNAMIC,1:DYNAMIC},{0:DYNAMIC})
opset: domain='' version=20
input: name='ci' type=dtype('int64') shape=None
input: name='a' type=dtype('float32') shape=['s52', 's98']
input: name='b' type=dtype('float32') shape=['s52', 3]
init: name='val_0_3' type=int64 shape=() -- array([1])
Shape(b, end=1, start=0) -> val_0_2
Squeeze(val_0_2) -> sym_size_int_5
Less(ci, sym_size_int_5) -> val_0
Loop(, val_0, ci, a, b, body=G1) -> getitem, getitem_1, getitem_2
output: name='getitem' type=dtype('int64') shape=None
output: name='getitem_1' type=dtype('float32') shape=['s52', 's98']
output: name='getitem_2' type=dtype('float32') shape=['s52', 3]
----- subgraph ---- Loop - node_while_loop__2 - att.body=G1 -- level=1 -- iter_num_while_loop_body_graph_0,cond_in_while_loop_body_graph_0,ci_while_loop_body_graph_0__subgraph_in,a_while_loop_body_graph_0__subgraph_in,b_while_loop_body_graph_0__subgraph_in -> lt_while_loop_cond_graph_0,add_6_while_loop_body_graph_0,add_7_while_loop_body_graph_0,sub_5_while_loop_body_graph_0
input: name='iter_num_while_loop_body_graph_0' type=dtype('int64') shape=None
input: name='cond_in_while_loop_body_graph_0' type=dtype('bool') shape=None
input: name='ci_while_loop_body_graph_0__subgraph_in' type=dtype('int64') shape=None
input: name='a_while_loop_body_graph_0__subgraph_in' type=dtype('float32') shape=['s52', 's98']
input: name='b_while_loop_body_graph_0__subgraph_in' type=dtype('float32') shape=['s52', 3]
Add(ci_while_loop_body_graph_0__subgraph_in, val_0_3) -> add_6_while_loop_body_graph_0
Add(a_while_loop_body_graph_0__subgraph_in, b_while_loop_body_graph_0__subgraph_in) -> add_7_while_loop_body_graph_0
Sub(b_while_loop_body_graph_0__subgraph_in, a_while_loop_body_graph_0__subgraph_in) -> sub_5_while_loop_body_graph_0
Shape(sub_5_while_loop_body_graph_0, end=1, start=0) -> val_0_4
Squeeze(val_0_4) -> sym_size_int_5_2
Less(add_6_while_loop_body_graph_0, sym_size_int_5_2) -> lt_while_loop_cond_graph_0
output: name='lt_while_loop_cond_graph_0' type=dtype('bool') shape=None
output: name='add_6_while_loop_body_graph_0' type=dtype('int64') shape=None
output: name='add_7_while_loop_body_graph_0' type=dtype('float32') shape=['s52', 3]
output: name='sub_5_while_loop_body_graph_0' type=dtype('float32') shape=['s52', 3]
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
CreateFromShape#
code: yobx.torch._model_eval_cases.CreateFromShape
forward#
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
yobx#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='SqueezeBinaryUnsqueezePattern_init7_s_1' type=int64 shape=(1,) -- array([1])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s_1)##init7_s_1/shape_type_compute._cast_inputs.1(add)##init7_s1_0/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Shape(x, end=2, start=1) -> x::Shape1:2
Add(x::Shape1:2, SqueezeBinaryUnsqueezePattern_init7_s_1) -> add::UnSq0
Concat(x::Shape:1, add::UnSq0, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy']
dynamo-ir#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='val_10' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_2' type=int64 shape=() -- array([1])
init: name='val_5' type=int64 shape=(1,) -- array([-1])
Shape(x, end=1, start=0) -> val_0
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Add(sym_size_int_3, val_2) -> add
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_0, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['dx', 'dy + 1']
tracing#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
Shape(x) -> getattr_1
Split(getattr_1, axis=0, num_outputs=2) -> _onx_gather_getattr_1, _onx_gather_getattr_2
Squeeze(_onx_gather_getattr_2, init7_s1_0) -> getitem_1
Add(getitem_1, init7_s_1) -> _onx_add_getitem_1
Unsqueeze(_onx_add_getitem_1, init7_s1_0) -> add::UnSq0
Concat(_onx_gather_getattr_1, add::UnSq0, axis=0) -> _onx_concat_getitem::UnSq0
ConstantOfShape(_onx_concat_getitem::UnSq0, value=[1.0]) -> output
output: name='output' type=dtype('float32') shape=['dx', 'add']
CreateFromShapeThroughFunction#
code: yobx.torch._model_eval_cases.CreateFromShapeThroughFunction
forward#
def forward(self, x):
dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
yobx#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='SqueezeBinaryUnsqueezePattern_init7_s_1' type=int64 shape=(1,) -- array([1])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s_1)##init7_s_1/shape_type_compute._cast_inputs.1(add)##init7_s1_0/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Shape(x, end=2, start=1) -> x::Shape1:2
Add(x::Shape1:2, SqueezeBinaryUnsqueezePattern_init7_s_1) -> add::UnSq0
Concat(x::Shape:1, add::UnSq0, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy']
dynamo-ir#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='val_10' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_2' type=int64 shape=() -- array([1])
init: name='val_5' type=int64 shape=(1,) -- array([-1])
Shape(x, end=1, start=0) -> val_0
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Add(sym_size_int_3, val_2) -> add
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_0, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['dx', 'dy + 1']
tracing#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
Shape(x) -> getattr_1
Split(getattr_1, axis=0, num_outputs=2) -> _onx_gather_getattr_2, _onx_gather_getattr_1
Squeeze(_onx_gather_getattr_1, init7_s1_0) -> getitem
Add(getitem, init7_s_1) -> _onx_add_getitem
Unsqueeze(_onx_add_getitem, init7_s1_0) -> add::UnSq0
Concat(_onx_gather_getattr_2, add::UnSq0, axis=0) -> _onx_concat_getitem_1::UnSq0
ConstantOfShape(_onx_concat_getitem_1::UnSq0, value=[1.0]) -> output
output: name='output' type=dtype('float32') shape=['dx', 'add']
CropLastDimensionWithTensorContent#
code: yobx.torch._model_eval_cases.CropLastDimensionWithTensorContent
forward#
def forward(self, x, shape):
return x[..., : shape[0]]
yobx#
W0319 17:50:52.050000 308934 torch/fx/experimental/symbolic_shapes.py:8382] Unable to find user code corresponding to {u0}
FAILED
Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (_export/non_strict_utils.py:1159 in __torch_function__)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
The following call raised this error:
File "~/github/yet-another-onnx-builder/yobx/torch/_model_eval_cases.py", line 905, in forward
return x[..., : shape[0]]
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
W0319 17:50:52.634000 308934 torch/fx/experimental/symbolic_shapes.py:8382] Unable to find user code corresponding to {u0}
* inputs: #2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]
* shapes: dict(x:{0:Dim(batch)},shape:{})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='val_6' type=int64 shape=(1,) -- array([0])
init: name='val_13' type=int64 shape=(1,) -- array([2])
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_1' type=int64 shape=(1,) -- array([-1])
init: name='val_14' type=int64 shape=(1,) -- array([1])
Gather(shape, val_0, axis=0) -> select
Reshape(select, val_1, allowzero=0) -> val_2
Gather(val_2, val_0, axis=0) -> val_3
Reshape(val_3, val_1, allowzero=0) -> val_9
Slice(x, val_6, val_9, val_13, val_14) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 4, 'u1']
tracing#
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Gather(shape, init7_s1_0) -> _onx_gather_shape
Slice(x, init7_s1_0, _onx_gather_shape, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['batch', 4, 'getitem']
CropLastDimensionWithTensorShape#
code: yobx.torch._model_eval_cases.CropLastDimensionWithTensorShape
forward#
def forward(self, x, y):
return x[..., : y.shape[0]]
yobx#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(y, end=1, start=0) -> y::Shape:1
Slice(x, init7_s1_0, y::Shape:1, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'crop']
dynamo-ir#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='val_8' type=int64 shape=(1,) -- array([2])
init: name='val_1' type=int64 shape=(1,) -- array([0])
init: name='val_9' type=int64 shape=(1,) -- array([1])
Shape(y, end=1, start=0) -> val_0
Slice(x, val_1, val_0, val_8, val_9) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 4, 'crop']
tracing#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_0) -> _onx_gather_getattr_1
Slice(x, init7_s1_0, _onx_gather_getattr_1, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['batch', 4, 'crop']
ExportWithDimension0#
code: yobx.torch._model_eval_cases.ExportWithDimension0
forward#
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
yobx#
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'channel']
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- ReshapeIsSqueezePattern.m1
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2) -> sym_size_int_2
Cast(sym_size_int_2, to=1) -> sym_size_int_2::C1
Range(init1_s_, sym_size_int_2::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 's27']
init: name='val_3' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_9' type=int64 shape=(2,) -- array([-1, 1])
Shape(x, end=2, start=1) -> val_0
Squeeze(val_0) -> sym_size_int_5
Cast(sym_size_int_5, to=1) -> val_1
Range(val_3, val_1, val_5) -> arange
Reshape(arange, val_9, allowzero=1) -> view
MatMul(x, view) -> matmul
output: name='matmul' type=dtype('float32') shape=['s77', 1]
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
ExportWithDimension1#
code: yobx.torch._model_eval_cases.ExportWithDimension1
forward#
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
yobx#
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'channel']
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- ReshapeIsSqueezePattern.m1
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2) -> sym_size_int_2
Cast(sym_size_int_2, to=1) -> sym_size_int_2::C1
Range(init1_s_, sym_size_int_2::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 's27']
init: name='val_3' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_9' type=int64 shape=(2,) -- array([-1, 1])
Shape(x, end=2, start=1) -> val_0
Squeeze(val_0) -> sym_size_int_5
Cast(sym_size_int_5, to=1) -> val_1
Range(val_3, val_1, val_5) -> arange
Reshape(arange, val_9, allowzero=1) -> view
MatMul(x, view) -> matmul
output: name='matmul' type=dtype('float32') shape=['s77', 1]
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC), 1: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
InplaceAdd#
code: yobx.torch._model_eval_cases.InplaceAdd
forward#
def forward(self, x):
x += self.bias
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
InplaceAdd2#
code: yobx.torch._model_eval_cases.InplaceAdd2
forward#
def forward(self, x):
x.add_(self.bias)
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['batch', 4]
tracing#
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-BIE] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_types={'bias': 1, 'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=['bias']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), ('val', torch.float32, torch.Size([s26, 4])))) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceAdd2()
def forward(self, x):
bias = self.bias
add_ = x.add_(bias); x = bias = None
return add_
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
return add_
-- process.inputs_to_remove --
set()
-- process.progress --
node 2/4 target=add_
-- 1 INPUTS
[GraphBuilder-BIE.1.make_tensor_input] x[1:batchx4]
-- 1 INITIALIZERS
[GraphBuilder-BIE.1.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
-- 0 OUTPUTS
[GraphBuilder-BIE] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
InplaceAdd_Mul#
code: yobx.torch._model_eval_cases.InplaceAdd_Mul
forward#
def forward(self, x):
x.add_(self.bias)
return x * 2
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
Add(x, c_bias) -> add_
Mul(add_, init1_s_::RSh1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
Add(x, bias) -> add_3
Mul(add_3, scalar_tensor_default) -> mul_4
output: name='mul_4' type=dtype('float32') shape=['batch', 4]
tracing#
FAILED
Unable to interpret method 'aten_meth_add_', args=(x, bias), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-BFK] Message starts, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'bias': (1, 4), 'x': ('batch', 4)}
_known_types={'bias': 1, 'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=['bias']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
add_ -> {mul}
bias -> {add_}
x -> {add_}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), ('val', torch.float32, torch.Size([s26, 4])))) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceAdd_Mul()
def forward(self, x):
bias = self.bias
add_ = x.add_(bias); x = bias = None
mul = add_ * 2; add_ = None
return mul
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
return mul
-- process.inputs_to_remove --
set()
-- process.progress --
node 2/5 target=add_
-- 1 INPUTS
[GraphBuilder-BFK.1.make_tensor_input] x[1:batchx4]
-- 1 INITIALIZERS
[GraphBuilder-BFK.1.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
-- 0 OUTPUTS
[GraphBuilder-BFK] Message completed, there are 1 initializers, 0 nodes, 1 inputs, 1 outputs.
InplaceCloneAdd#
code: yobx.torch._model_eval_cases.InplaceCloneAdd_
forward#
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_6
output: name='add_6' type=dtype('float32') shape=['batch', 4]
tracing#
FAILED
Unable to interpret method 'aten_meth_add_', args=(clone, bias), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-RWY] Message starts, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'bias': (1, 4), 'clone': ('batch', 4), 'x': ('batch', 4)}
_known_types={'bias': 1, 'clone': 1, 'x': 1}
_known_devices={'clone': -1, 'x': -1}
_context=[]
_known_value_shape={}
_known_constants=['bias']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
add_ -> {output}
bias -> {add_}
clone -> {add_}
x -> {clone}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), ('val', torch.float32, torch.Size([s26, 4])))) --- 1:2:('batch', 4):
clone: ('run_node', ('', '')) --- 1:2:('batch', 4):
bias: ('run_node', ('', '')) --- 1:2:(1, 4):
add_: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceCloneAdd_()
def forward(self, x):
clone = x.clone(); x = None
bias = self.bias
add_ = clone.add_(bias); clone = bias = None
return add_
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
return add_
-- process.inputs_to_remove --
set()
-- process.progress --
node 3/5 target=add_
-- 1 INPUTS
[GraphBuilder-RWY.1.make_tensor_input] x[1:batchx4]
-- 1 INITIALIZERS
[GraphBuilder-RWY.1.make_initializer] bias[torch.float32:torch.float32:[1.0, 1.0, 1.0, 1.0]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-RWY.4.make_node] .clone [@:@ ] Identity:['x']->['clone']
-- 0 OUTPUTS
[GraphBuilder-RWY] Message completed, there are 1 initializers, 1 nodes, 1 inputs, 1 outputs.
InplaceSetItemEllipsis_1#
code: yobx.torch._model_eval_cases.InplaceSetItemEllipsis_1
forward#
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
yobx#
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
E0319 17:51:04.291000 308934 torch/_guards.py:368] Error while creating guard: E0319 17:51:04.291000 308934 torch/_guards.py:368] Name: ‘’ E0319 17:51:04.291000 308934 torch/_guards.py:368] Source: shape_env E0319 17:51:04.291000 308934 torch/_guards.py:368] Create Function: SHAPE_ENV E0319 17:51:04.291000 308934 torch/_guards.py:368] Guard Types: None E0319 17:51:04.291000 308934 torch/_guards.py:368] Code List: None E0319 17:51:04.291000 308934 torch/_guards.py:368] Object Weakref: None E0319 17:51:04.291000 308934 torch/_guards.py:368] Guarded Class Weakref: None E0319 17:51:04.291000 308934 torch/_guards.py:368] Traceback (most recent call last): E0319 17:51:04.291000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_guards.py”, line 366, in create E0319 17:51:04.291000 308934 torch/_guards.py:368] return self.create_fn(builder, self) E0319 17:51:04.291000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0319 17:51:04.291000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3081, in SHAPE_ENV E0319 17:51:04.291000 308934 torch/_guards.py:368] python_code_parts, verbose_code_parts = _get_code_parts( E0319 17:51:04.291000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^ E0319 17:51:04.291000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3054, in _get_code_parts E0319 17:51:04.291000 308934 torch/_guards.py:368] return output_graph.shape_env.produce_guards_verbose( E0319 17:51:04.291000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0319 17:51:04.291000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 5775, in produce_guards_verbose E0319 17:51:04.291000 308934 torch/_guards.py:368] raise ConstraintViolationError( E0319 17:51:04.291000 308934 torch/_guards.py:368] torch.fx.experimental.symbolic_shapes.ConstraintViolationError: L[‘flat_args’][1].size()[0] = 8192 is not equal to L[‘flat_args’][0].size()[0] = 4 E0319 17:51:04.295000 308934 torch/_guards.py:370] Created at: E0319 17:51:04.295000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/convert_frame.py”, line 818, in trace_frame E0319 17:51:04.295000 308934 torch/_guards.py:370] tracer = InstructionTranslator( E0319 17:51:04.295000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/symbolic_convert.py”, line 4699, in __init__ E0319 17:51:04.295000 308934 torch/_guards.py:370] output=OutputGraph( E0319 17:51:04.295000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 704, in __init__ E0319 17:51:04.295000 308934 torch/_guards.py:370] self.init_ambient_guards() E0319 17:51:04.295000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 966, in init_ambient_guards E0319 17:51:04.295000 308934 torch/_guards.py:370] self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: Dim('batch', min=0), 1: DimHint(DYNAMIC)} at index 1, self._mapping_str={'batch': 13}
InplaceSetItemEllipsis_2#
code: yobx.torch._model_eval_cases.InplaceSetItemEllipsis_2
forward#
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
yobx#
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
E0319 17:51:05.039000 308934 torch/_guards.py:368] Error while creating guard: E0319 17:51:05.039000 308934 torch/_guards.py:368] Name: ‘’ E0319 17:51:05.039000 308934 torch/_guards.py:368] Source: shape_env E0319 17:51:05.039000 308934 torch/_guards.py:368] Create Function: SHAPE_ENV E0319 17:51:05.039000 308934 torch/_guards.py:368] Guard Types: None E0319 17:51:05.039000 308934 torch/_guards.py:368] Code List: None E0319 17:51:05.039000 308934 torch/_guards.py:368] Object Weakref: None E0319 17:51:05.039000 308934 torch/_guards.py:368] Guarded Class Weakref: None E0319 17:51:05.039000 308934 torch/_guards.py:368] Traceback (most recent call last): E0319 17:51:05.039000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_guards.py”, line 366, in create E0319 17:51:05.039000 308934 torch/_guards.py:368] return self.create_fn(builder, self) E0319 17:51:05.039000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0319 17:51:05.039000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3081, in SHAPE_ENV E0319 17:51:05.039000 308934 torch/_guards.py:368] python_code_parts, verbose_code_parts = _get_code_parts( E0319 17:51:05.039000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^ E0319 17:51:05.039000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3054, in _get_code_parts E0319 17:51:05.039000 308934 torch/_guards.py:368] return output_graph.shape_env.produce_guards_verbose( E0319 17:51:05.039000 308934 torch/_guards.py:368] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0319 17:51:05.039000 308934 torch/_guards.py:368] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 5775, in produce_guards_verbose E0319 17:51:05.039000 308934 torch/_guards.py:368] raise ConstraintViolationError( E0319 17:51:05.039000 308934 torch/_guards.py:368] torch.fx.experimental.symbolic_shapes.ConstraintViolationError: L[‘flat_args’][1].size()[0] = 8192 is not equal to L[‘flat_args’][0].size()[0] = 4 E0319 17:51:05.040000 308934 torch/_guards.py:370] Created at: E0319 17:51:05.040000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/convert_frame.py”, line 818, in trace_frame E0319 17:51:05.040000 308934 torch/_guards.py:370] tracer = InstructionTranslator( E0319 17:51:05.040000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/symbolic_convert.py”, line 4699, in __init__ E0319 17:51:05.040000 308934 torch/_guards.py:370] output=OutputGraph( E0319 17:51:05.040000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 704, in __init__ E0319 17:51:05.040000 308934 torch/_guards.py:370] self.init_ambient_guards() E0319 17:51:05.040000 308934 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 966, in init_ambient_guards E0319 17:51:05.040000 308934 torch/_guards.py:370] self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: Dim('batch', min=0), 1: DimHint(DYNAMIC)} at index 1, self._mapping_str={'batch': 13}
InplaceSetItemMask#
code: yobx.torch._model_eval_cases.InplaceSetItemMask
forward#
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
yobx#
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 3]
dynamo-ir#
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='clone' type=float32 shape=() -- array([2.], dtype=float32)
Cast(x, to=9) -> _to_copy
Where(_to_copy, clone, x) -> index_put
output: name='index_put' type=dtype('float32') shape=['batch', 3, 3]
tracing#
FAILED
The conversion of operator 'aten_setitem' into a local function failed.
--ERROR--
Unexpected type <class 'int'> for name.
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-RZK] Message starts, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'to': ('batch', 3, 3), 'x': ('batch', 3, 3)}
_known_types={'to': 9, 'x': 1}
_known_devices={'to': -1, 'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
setitem -> {output}
to -> {setitem}
x -> {setitem, to}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([2, 3, 3])), ('val', torch.float32, torch.Size([s26, 3, 3])))) --- 1:3:('batch', 3, 3):
to: ('run_node', ('', '')) --- 9:3:('batch', 3, 3):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemMask()
def forward(self, x):
to = x.to(torch.bool)
x[to] = 2; setitem = x; x = to = None
return setitem
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=2] = placeholder[target=x]
%to : [num_users=1] = call_method[target=to](args = (%x, torch.bool), kwargs = {})
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, %to, 2), kwargs = {})
return setitem
-- process.inputs_to_remove --
set()
-- process.progress --
node 2/4 target=<built-in function setitem>
-- 1 INPUTS
[GraphBuilder-RZK.1.make_tensor_input] x[1:batchx3x3]
-- 0 INITIALIZERS
[GraphBuilder-RZK.4.make_node] .to [@:@ ] Cast>9:['x']->['to']
-- 0 OUTPUTS
[GraphBuilder-RZK] Message completed, there are 0 initializers, 1 nodes, 1 inputs, 1 outputs.
InplaceSetItemSquare#
code: yobx.torch._model_eval_cases.InplaceSetItemSquare
forward#
def forward(self, x):
x[:2, :3] = 1
return x
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_4' type=int64 shape=(1,) -- array([0])
init: name='val_8' type=int64 shape=(1,) -- array([2])
init: name='val_20' type=int64 shape=(1,) -- array([3])
init: name='val_24' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_1' type=int64 shape=() -- array([0])
init: name='val_21' type=int64 shape=() -- array([1])
init: name='val_45' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_51
Gather(val_51, val_1, axis=0) -> val_52
Range(val_1, val_52, val_21) -> val_53
Slice(val_53, val_4, val_8, val_4, val_24) -> val_57
Unsqueeze(val_57, val_45) -> val_58
Slice(x, val_4, val_8, val_4, val_24) -> slice_1
Slice(slice_1, val_4, val_20, val_24, val_24) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_47
Shape(slice_1, start=0) -> val_38
Gather(val_38, val_21, axis=0) -> val_39
Range(val_1, val_39, val_21) -> val_40
Slice(val_40, val_4, val_20, val_4, val_24) -> val_44
Unsqueeze(val_44, val_45) -> val_46
Transpose(slice_1, perm=[1,0]) -> val_48
ScatterND(val_48, val_46, val_47, reduction=b'none') -> val_49
Transpose(val_49, perm=[1,0]) -> slice_scatter
ScatterND(x, val_58, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=['batch', 5]
tracing#
FAILED
The conversion of operator 'aten_setitem' into a local function failed.
--ERROR--
Unexpected type <class 'int'> for name.
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-CFG] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'x': ('batch', 5)}
_known_types={'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
setitem -> {output}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), ('val', torch.float32, torch.Size([s26, 5])))) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquare()
def forward(self, x):
x[(slice(None, 2, None), slice(None, 3, None))] = 1; setitem = x; x = None
return setitem
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
return setitem
-- process.inputs_to_remove --
set()
-- process.progress --
node 1/3 target=<built-in function setitem>
-- 1 INPUTS
[GraphBuilder-CFG.1.make_tensor_input] x[1:batchx5]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-CFG] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
InplaceSetItemSquareAdd#
code: yobx.torch._model_eval_cases.InplaceSetItemSquareAdd
forward#
def forward(self, x):
x[:2, :3] = 1
return x + 2
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Add(output_0, init1_s_::RSh1) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_4' type=int64 shape=(1,) -- array([0])
init: name='val_8' type=int64 shape=(1,) -- array([2])
init: name='val_20' type=int64 shape=(1,) -- array([3])
init: name='val_24' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
init: name='val_1' type=int64 shape=() -- array([0])
init: name='val_21' type=int64 shape=() -- array([1])
init: name='val_45' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_51
Gather(val_51, val_1, axis=0) -> val_52
Range(val_1, val_52, val_21) -> val_53
Slice(val_53, val_4, val_8, val_4, val_24) -> val_57
Unsqueeze(val_57, val_45) -> val_58
Slice(x, val_4, val_8, val_4, val_24) -> slice_1
Slice(slice_1, val_4, val_20, val_24, val_24) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_47
Shape(slice_1, start=0) -> val_38
Gather(val_38, val_21, axis=0) -> val_39
Range(val_1, val_39, val_21) -> val_40
Slice(val_40, val_4, val_20, val_4, val_24) -> val_44
Unsqueeze(val_44, val_45) -> val_46
Transpose(slice_1, perm=[1,0]) -> val_48
ScatterND(val_48, val_46, val_47, reduction=b'none') -> val_49
Transpose(val_49, perm=[1,0]) -> slice_scatter
ScatterND(x, val_58, slice_scatter, reduction=b'none') -> slice_scatter_1
Add(slice_scatter_1, scalar_tensor_default) -> add_12
output: name='add_12' type=dtype('float32') shape=['batch', 5]
tracing#
FAILED
The conversion of operator 'aten_setitem' into a local function failed.
--ERROR--
Unexpected type <class 'int'> for name.
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-BMU] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'x': ('batch', 5)}
_known_types={'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
setitem -> {add}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), ('val', torch.float32, torch.Size([s26, 5])))) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquareAdd()
def forward(self, x):
x[(slice(None, 2, None), slice(None, 3, None))] = 1; setitem = x; x = None
add = setitem + 2; setitem = None
return add
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
return add
-- process.inputs_to_remove --
set()
-- process.progress --
node 1/4 target=<built-in function setitem>
-- 1 INPUTS
[GraphBuilder-BMU.1.make_tensor_input] x[1:batchx5]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-BMU] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
InplaceSetItemSquareAdd2#
code: yobx.torch._model_eval_cases.InplaceSetItemSquareAdd2
forward#
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_2::RSh1' type=float32 shape=(1,) -- array([3.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Add(output_0, init1_s_::RSh1) -> output_1
Add(output_0, init1_s_2::RSh1) -> output_2
output: name='output_1' type=dtype('float32') shape=['batch', 5]
output: name='output_2' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_4' type=int64 shape=(1,) -- array([0])
init: name='val_8' type=int64 shape=(1,) -- array([2])
init: name='val_20' type=int64 shape=(1,) -- array([3])
init: name='val_24' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
init: name='scalar_tensor_default_1' type=float32 shape=() -- array([3.], dtype=float32)
init: name='val_1' type=int64 shape=() -- array([0])
init: name='val_21' type=int64 shape=() -- array([1])
init: name='val_45' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_51
Gather(val_51, val_1, axis=0) -> val_52
Range(val_1, val_52, val_21) -> val_53
Slice(val_53, val_4, val_8, val_4, val_24) -> val_57
Unsqueeze(val_57, val_45) -> val_58
Slice(x, val_4, val_8, val_4, val_24) -> slice_1
Slice(slice_1, val_4, val_20, val_24, val_24) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_47
Shape(slice_1, start=0) -> val_38
Gather(val_38, val_21, axis=0) -> val_39
Range(val_1, val_39, val_21) -> val_40
Slice(val_40, val_4, val_20, val_4, val_24) -> val_44
Unsqueeze(val_44, val_45) -> val_46
Transpose(slice_1, perm=[1,0]) -> val_48
ScatterND(val_48, val_46, val_47, reduction=b'none') -> val_49
Transpose(val_49, perm=[1,0]) -> slice_scatter
ScatterND(x, val_58, slice_scatter, reduction=b'none') -> slice_scatter_1
Add(slice_scatter_1, scalar_tensor_default) -> add_12
Add(slice_scatter_1, scalar_tensor_default_1) -> add_16
output: name='add_12' type=dtype('float32') shape=['batch', 5]
output: name='add_16' type=dtype('float32') shape=['batch', 5]
tracing#
FAILED
The conversion of operator 'aten_setitem' into a local function failed.
--ERROR--
Unexpected type <class 'int'> for name.
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-FVS] Message starts, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'x': ('batch', 5)}
_known_types={'x': 1}
_known_devices={'x': -1}
_context=[]
_known_value_shape={}
_known_constants=[]
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
setitem -> {add_1, add}
x -> {setitem}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([5, 5])), ('val', torch.float32, torch.Size([s26, 5])))) --- 1:2:('batch', 5):
setitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquareAdd2()
def forward(self, x):
x[(slice(None, 2, None), slice(None, 3, None))] = 1; setitem = x; x = None
add = setitem + 2
add_1 = setitem + 3; setitem = None
return (add, add_1)
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=2] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%setitem, 3), kwargs = {})
return (add, add_1)
-- process.inputs_to_remove --
set()
-- process.progress --
node 1/5 target=<built-in function setitem>
-- 1 INPUTS
[GraphBuilder-FVS.1.make_tensor_input] x[1:batchx5]
-- 0 INITIALIZERS
-- 0 OUTPUTS
[GraphBuilder-FVS] Message completed, there are 0 initializers, 0 nodes, 1 inputs, 1 outputs.
SignatureFloat1#
code: yobx.torch._model_eval_cases.SignatureFloat1
forward#
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
yobx#
inputs:
#2[(T1s4x3,float),(T1s8x3,float)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='mul' type=float32 shape=(1,) -- array([0.75], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_mul_b_buff)##_onx_mul_b_buff/GraphBuilder.constant_folding.from/fold(b_buff,init1_s_::RSh1)##b_buff/DynamoInterpret.placeholder.0##init1_s_::RSh1/GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.57191503, 0.24015383, -0.00598541], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.09365652], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='yobx'
dynamo-ir#
inputs:
#2[(T1s4x3,float),(T1s8x3,float)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.18983322, 0.5742942 , -0.47387984], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.36055017], dtype=float32)
init: name='mul_2' type=float32 shape=(1,) -- array([0.75], dtype=float32)
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, mul_2) -> sub_2
output: name='sub_2' type=dtype('float32') shape=['s77', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'
tracing#
FAILED
Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-NSY] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
input_names=['x', 'alpha']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=[0]
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes=({0: Dim('batch', min=1, max=1024)}, None)
_known_shapes={'_sub_ime__linear__onx_matmul_input_1': ('batch', 1),
'_sub_ime__linear_input_1': ('batch', 3),
'_sub_ime__linear_linear': ('batch', 1),
'_sub_ime__linear_output': ('batch', 1),
'_sub_ime__linear_weight::T10': (3, 1),
'alpha': (),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'x': ('batch', 3)}
_known_types={'_sub_ime__linear__onx_matmul_input_1': 1,
'_sub_ime__linear_input_1': 1,
'_sub_ime__linear_linear': 1,
'_sub_ime__linear_output': 1,
'_sub_ime__linear_weight::T10': 1,
'alpha': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'x': 1}
_known_devices={'_sub_ime__linear_input_1': -1, 'alpha': -1, 'x': -1}
_context=[]
_known_value_shape={}
_known_constants=['_sub_ime__linear_weight::T10', 'buff', 'linear.bias', 'linear.weight']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
alpha -> {mul}
buff -> {mul}
linear -> {sigmoid}
mul -> {sub}
sigmoid -> {sub}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), ('val', torch.float32, torch.Size([s26, 3])))) --- 1:2:('batch', 3):
alpha: ('run_node', (('example_value', torch.float32, torch.Size([])), ('val', torch.float32, torch.Size([])))) --- 1:0:():
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
mul: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
SignatureFloat1(
(linear): Linear(in_features=3, out_features=1, bias=True)
)
def forward(self, x, alpha : float = 2.0):
linear = self.linear(x); x = None
sigmoid = torch.sigmoid(linear); linear = None
buff = self.buff
mul = buff.mul(alpha); buff = alpha = None
sub = sigmoid - mul; sigmoid = mul = None
return sub
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
-- process.inputs_to_remove --
set()
-- process.progress --
node 5/8 target=mul
-- 2 INPUTS
[GraphBuilder-NSY.1.make_tensor_input] x[1:batchx3]
[GraphBuilder-NSY.1.make_tensor_input] alpha[1:]
-- 3 INITIALIZERS
[GraphBuilder-NSY.1.make_initializer] linear.weight[torch.float32:torch.float32:[0.13624539971351624, -0.5618370175361633, 0.5489438772201538]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-NSY.1.make_initializer] linear.bias[torch.float32:torch.float32:[0.19259147346019745]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-NSY.1.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-NSY.4.make_node] .make_nodes [@:@ ] Identity:['x']->['_sub_ime__linear_input_1']
[GraphBuilder-NSY.4.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_ime__linear_weight::T10']
[GraphBuilder-NSY.4.make_node] Opset [@#:# ] MatMul:['_sub_ime__linear_input_1', '_sub_ime__linear_weight::T10']->['_sub_ime__linear__onx_matmul_input_1']
[GraphBuilder-NSY.4.make_node] Opset2 [##:# ] Add:['_sub_ime__linear__onx_matmul_input_1', 'linear.bias']->['_sub_ime__linear_linear']
[GraphBuilder-NSY.4.make_node] .output [#:# ] Identity:['_sub_ime__linear_linear']->['_sub_ime__linear_output']
[GraphBuilder-NSY.4.make_node] .make_nodes2 [#:# ] Identity:['_sub_ime__linear_output']->['linear']
[GraphBuilder-NSY.4.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
-- 0 OUTPUTS
[GraphBuilder-NSY] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
SignatureInt1#
code: yobx.torch._model_eval_cases.SignatureInt1
forward#
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
yobx#
inputs:
#2[(T1s4x3,int),(T1s8x3,int)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.32786742, 0.33581495, 0.07242316], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.45419547], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_1
Add(sub, slice_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='yobx'
dynamo-ir#
inputs:
#2[(T1s4x3,int),(T1s8x3,int)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.41108242, 0.18681763, 0.09263336], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.09269895], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
init: name='val_7' type=int64 shape=(1,) -- array([2])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Slice(x, val_3, val_7, val_3, val_3) -> slice_1
Add(sub_2, slice_1) -> add_12
output: name='add_12' type=dtype('float32') shape=['s77', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'
tracing#
FAILED
[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Concat node. Name:'_getitem_slicenSD' Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/tensor/concat.cc:139 onnxruntime::common::Status onnxruntime::ConcatBase::PrepareForCompute(onnxruntime::OpKernelContext*, const InlinedTensorsVector&, onnxruntime::Prepare&) const input_rank == reference_rank was false. Ranks of input data are different, cannot concatenate them. expected rank: 1 got: 2
SignatureInt2#
code: yobx.torch._model_eval_cases.SignatureInt2
forward#
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
yobx#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([0.4729926, 0.5120745, 0.2920497], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.38347214], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, select) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='yobx'
dynamo-ir#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.17202848, 0.5655524 , -0.22823821], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.12821105], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_12' type=int64 shape=() -- array([1])
Gather(x, val_12, axis=1) -> select
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Add(sub_2, select) -> add_14
output: name='add_14' type=dtype('float32') shape=['s77', 's77']
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='dynamo-ir'
tracing#
FAILED
One index is given as an integer i but this requires to append a node 'Squeeze' after this one and this is not yet implemented. You can replace the integer by `i:i+1`
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-BDO] Message starts, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
input_names=['x', 'i']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
s26 = 's26'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes={'i': None, 'x': {0: Dim('batch', min=0)}}
_known_shapes={'_sub_ime__linear__onx_matmul_input_1': ('batch', 1),
'_sub_ime__linear_input_1': ('batch', 3),
'_sub_ime__linear_linear': ('batch', 1),
'_sub_ime__linear_output': ('batch', 1),
'_sub_ime__linear_weight::T10': (3, 1),
'buff': (1,),
'getitem_axis': (2,),
'getitem_axis_0': (1,),
'getitem_end': (1,),
'getitem_shape': (2,),
'i': (),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'sub': ('batch', 1),
'x': ('batch', 3)}
_known_types={'_sub_ime__linear__onx_matmul_input_1': 1,
'_sub_ime__linear_input_1': 1,
'_sub_ime__linear_linear': 1,
'_sub_ime__linear_output': 1,
'_sub_ime__linear_weight::T10': 1,
'buff': 1,
'getitem_axis': 7,
'getitem_axis_0': 7,
'getitem_end': 7,
'getitem_shape': 7,
'i': 7,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'sub': 1,
'x': 1}
_known_devices={'_sub_ime__linear_input_1': -1,
'getitem_end': -1,
'getitem_shape': -1,
'i': -1,
'x': -1}
_context=[]
_known_value_shape={'getitem_shape': ('batch', 3)}
_known_constants=['_sub_ime__linear_weight::T10',
'buff',
'getitem_axis',
'getitem_axis_0',
'linear.bias',
'linear.weight']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
buff -> {sub}
getitem -> {add}
i -> {getitem}
linear -> {sigmoid}
sigmoid -> {sub}
sub -> {add}
x -> {getitem, linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), ('val', torch.float32, torch.Size([s26, 3])))) --- 1:2:('batch', 3):
i: ('run_node', (('example_value', torch.int64, torch.Size([])), ('val', torch.int64, torch.Size([])))) --- 7:0:():
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
sub: ('run_node', ('', '')) --- 1:2:('batch', 1):
getitem: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=True, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
SignatureInt2(
(linear): Linear(in_features=3, out_features=1, bias=True)
)
def forward(self, x, i : int = 2):
linear = self.linear(x)
sigmoid = torch.sigmoid(linear); linear = None
buff = self.buff
sub = sigmoid - buff; sigmoid = buff = None
getitem = x[(slice(None, None, None), i)]; x = i = None
add = sub + getitem; sub = getitem = None
return add
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=1] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add
-- process.inputs_to_remove --
set()
-- process.progress --
node 6/9 target=<built-in function getitem>
-- 2 INPUTS
[GraphBuilder-BDO.1.make_tensor_input] x[1:batchx3]
[GraphBuilder-BDO.1.make_tensor_input] i[7:]
-- 5 INITIALIZERS
[GraphBuilder-BDO.1.make_initializer] linear.weight[torch.float32:torch.float32:[-0.4997558891773224, -0.3485206961631775, 0.22506844997406006]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-BDO.1.make_initializer] linear.bias[torch.float32:torch.float32:[0.15426619350910187]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-BDO.1.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-BDO.1.make_initializer] getitem_axis[int64:int64:[0, 1]] - SOURCE: DynamoInterpreter._getitem_slice.axis.1
[GraphBuilder-BDO.1.make_initializer] getitem_axis_0[int64:int64:[0]] - SOURCE: DynamoInterpreter._getitem_slice.axis.2
[GraphBuilder-BDO.4.make_node] .make_nodes [@:@ ] Identity:['x']->['_sub_ime__linear_input_1']
[GraphBuilder-BDO.4.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_ime__linear_weight::T10']
[GraphBuilder-BDO.4.make_node] Opset [@#:# ] MatMul:['_sub_ime__linear_input_1', '_sub_ime__linear_weight::T10']->['_sub_ime__linear__onx_matmul_input_1']
[GraphBuilder-BDO.4.make_node] Opset2 [##:# ] Add:['_sub_ime__linear__onx_matmul_input_1', 'linear.bias']->['_sub_ime__linear_linear']
[GraphBuilder-BDO.4.make_node] .output [#:# ] Identity:['_sub_ime__linear_linear']->['_sub_ime__linear_output']
[GraphBuilder-BDO.4.make_node] .make_nodes2 [#:# ] Identity:['_sub_ime__linear_output']->['linear']
[GraphBuilder-BDO.4.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
[GraphBuilder-BDO.4.make_node] sub [##:# ] Sub:['sigmoid', 'buff']->['sub']
[GraphBuilder-BDO.4.make_node] _getitem_slicenA [@:@ ] Shape:['x']->['getitem_shape']
[GraphBuilder-BDO.4.make_node] _getitem_slicenB [@#:@ ] GatherElements:['getitem_shape', 'getitem_axis_0']->['getitem_end']
-- 0 OUTPUTS
[GraphBuilder-BDO] Message completed, there are 5 initializers, 10 nodes, 2 inputs, 2 outputs.
SignatureListFixedLength#
code: yobx.torch._model_eval_cases.SignatureListFixedLength
forward#
def forward(self, x, lx: list):
return torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.36075652, 0.37531978, -0.4514364 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.4725915], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.43111077, -0.16575317, 0.4251272 ], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.1030468], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_4
Add(sub_2, mul_4) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([0.20705722], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.13542762, 0.3353378 , -0.253209 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
SignatureListFixedWithNone#
code: yobx.torch._model_eval_cases.SignatureListFixedWithNone
forward#
def forward(self, lx):
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
yobx#
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Length mismatch between x (len=3) and dynamic_shapes (len=2); dynamic_shapes must have one entry per element of x, or be None to use no dynamic dimensions, dynamic_shapes=[{0: Dim('batch', min=0)}, {0: Dim('batch', min=0)}]
SignatureListVariableLength#
code: yobx.torch._model_eval_cases.SignatureListVariableLength
forward#
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.1132583 , -0.26032928, 0.48393244], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.56866467], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.1759015, -0.4813443, 0.346603 ], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.17942502], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, sum_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([-0.266548], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.22617818, -0.5093121 , 0.17302878], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
Add(sub, sum_1) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
SignatureShapeAsIndex#
code: yobx.torch._model_eval_cases.SignatureShapeAsIndex
forward#
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
yobx#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.06057037, 0.47917342, 0.3887687 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.38659292], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(y, end=2, start=1) -> y::Shape1:2
Slice(add, init7_s1_0, y::Shape1:2, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'length']
dynamo-ir#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.10500433, 0.06449728, -0.46159565], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.37541813], dtype=float32)
init: name='val_8' type=int64 shape=(1,) -- array([1])
init: name='val_1' type=int64 shape=(1,) -- array([0])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add_6
Shape(y, end=2, start=1) -> val_0
Slice(add_6, val_1, val_0, val_8, val_8) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 'length']
tracing#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.37451068], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_start' type=int64 shape=(2,) -- array([0, 0]) -- DynamoInterpreter._getitem_slice.2
init: name='getitem_1_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
init: name='GemmTransposePattern--_sub_ime__linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.2344427 , 0.3499136 , -0.31948385], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime__linear_weight::T10,init7_s2_1_3)##_sub_ime__linear_weight::T10/GraphBuilder.constant_folding.from/fold(linear.weight)##linear.weight/GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime__linear_weight::T10, linear.bias, transB=1) -> _sub_ime__linear_linear
Sigmoid(_sub_ime__linear_linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(add) -> getitem_1_shape
GatherElements(getitem_1_shape, init7_s1_0) -> getitem_1_end
Shape(y) -> getattr_1
Gather(getattr_1, init7_s1_1) -> _onx_gather_getattr_1
Concat(getitem_1_end, _onx_gather_getattr_1, axis=0) -> _onx_concat_getitem_1_end
Slice(add, getitem_1_start, _onx_concat_getitem_1_end, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['NEWDIM_slice', 'NEWDIM_slice1']
TypeBFloat16#
code: yobx.torch._model_eval_cases.TypeBFloat16
forward#
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
yobx#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
Cast(x, to=16) -> _to_copy
Add(_to_copy, _to_copy) -> add_3
Cast(add_3, to=1) -> _to_copy_1
output: name='_to_copy_1' type=dtype('float32') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_add_3'
tracing#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
Vmap#
code: yobx.torch._model_eval_cases.Vmap
forward#
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return torch.vmap(f)(x, y)
yobx#
FAILED
Either your tensor may have escaped from inside a function being vmapped and this is a user error (see https://pytorch.org/functorch/stable/ux_limitations.html), or there is an internal functorch error in `gen_vmap_plumbing` Please file an issue if it looks like the latter
While executing %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {})
Original traceback:
File "~/github/yet-another-onnx-builder/yobx/torch/_model_eval_cases.py", line 956, in forward
return torch.vmap(f)(x, y)
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
dynamo-ir#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s17']
input: name='y' type=dtype('float32') shape=['s17']
init: name='_to_copy' type=float32 shape=() -- array([1.], dtype=float32)
Mul(x, y) -> mul
Add(mul, _to_copy) -> add_2
output: name='add_2' type=dtype('float32') shape=['s17']
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
VmapPython#
code: yobx.torch._model_eval_cases.VmapPython
forward#
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return patched_vmap(f)(x, y)
yobx#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch']
input: name='y' type=dtype('float32') shape=['batch_1']
init: name='init1_s_2_cst2init' type=float32 shape=() -- array([1.], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
Scan(x, y, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- scan_0_movedim,scan_1_movedim_1 -> output_0
input: name='scan_0_movedim' type='NOTENSOR' shape=None
input: name='scan_1_movedim_1' type='NOTENSOR' shape=None
Mul(scan_0_movedim, scan_1_movedim_1) -> mul2
Add(mul2, init1_s_2_cst2init) -> output_0
output: name='output_0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s17']
input: name='y' type=dtype('float32') shape=['s17']
init: name='scalar_tensor_default' type=float32 shape=() -- array([1.], dtype=float32)
Scan(x, y, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_directions=[0]) -> getitem
output: name='getitem' type=dtype('float32') shape=['s17']
----- subgraph ---- Scan - node_scan__0 - att.body=G1 -- level=1 -- permute_scan_combine_graph_0__subgraph_in,permute_1_scan_combine_graph_0__subgraph_in -> add_scan_combine_graph_0
input: name='permute_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=None
input: name='permute_1_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=None
Mul(permute_scan_combine_graph_0__subgraph_in, permute_1_scan_combine_graph_0__subgraph_in) -> mul
Add(mul, scalar_tensor_default) -> add_scan_combine_graph_0
output: name='add_scan_combine_graph_0' type=dtype('float32') shape=None
tracing#
FAILED
Unexpected type <class 'torch.export.dynamic_shapes._DimHint'> in dynamic_shapes={0: DimHint(DYNAMIC)} at index 0, self._mapping_str={}
Summary#
case |
ir |
tracing |
yobx |
|---|---|---|---|
FAIL |
FAIL |
||
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
|||
FAIL |
FAIL |
||
FAIL |
FAIL |
||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
||
FAIL |
FAIL |
||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
|||
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|
FAIL |
|||
FAIL |
FAIL |
||
FAIL |