Overview of Exportability Comparison#
The following script shows the exported program for many short cases to retrieve an ONNX model equivalent to the original model. Go to Bottom of the page to see a table summarizing the results.
<<<
import inspect
import textwrap
import pandas
from yobx.helpers import string_type
from yobx.helpers.onnx_helper import pretty_onnx
from yobx.torch.testing.model_eval_cases import discover, run_exporter
from yobx.ext_test_case import unit_test_going
cases = discover()
print()
print(":ref:`Summary <ledx-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <ledx-model-case-export-{name}>`")
print()
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _ledx-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print(f"code: :class:`yobx.torch.testing._model_eval_cases.{name}`")
print()
print("forward")
print("+++++++")
print()
print(".. code-block:: python")
print()
src = inspect.getsource(cls_model.forward)
if src:
print(textwrap.indent(textwrap.dedent(src), " "))
else:
print(" # code is missing")
print()
print()
for exporter in ("yobx", "dynamo-ir", "tracing", "new-tracing"):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, True, quiet=True)
case_ref = f":ref:`{name} <ledx-model-case-export-{name}>`"
expo = exporter
if "inputs" in res:
print(f"* **inputs:** ``{string_type(res['inputs'], with_shape=True)}``")
if "dynamic_shapes" in res:
print(f"* **shapes:** ``{string_type(res['dynamic_shapes'])}``")
print()
print()
if "onx" in res:
print(".. code-block:: text")
print()
print(textwrap.indent(pretty_onnx(res["onx"]), " "))
print()
print()
if "error" not in res:
obs.append(dict(case=case_ref, error="", exporter=expo))
if "error" in res:
print("**FAILED**")
print()
print(".. code-block:: text")
print()
err = str(res["error"])
if err:
print(textwrap.indent(err, " "))
else:
print(" # no error found for the failure")
print()
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=expo))
print()
print(".. _ledx-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided#
code: yobx.torch.testing._model_eval_cases.AtenAsStrided
forward#
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
yobx#
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,) -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> x::RSh-1
Gather(x::RSh-1, init7_s128_) -> _onx_gather_x::RSh-1
Reshape(_onx_gather_x::RSh-1, init7_s4_2_2_8_4) -> output_0
output: name='output_0' type=dtype('float32') shape=[2, 2, 8, 4]
dynamo-ir#
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 2, 8, 8]
init: name='rank_tensor' type=int64 shape=(1,) -- array([4])
init: name='val_0' type=int64 shape=(4,) -- array([2, 2, 8, 4])
init: name='val_1' type=int64 shape=(4,) -- array([128, 8, 16, 1])
init: name='neg_1' type=int64 shape=(1,) -- array([-1])
init: name='indices' type=int64 shape=() -- array([0])
init: name='rank_0' type=int64 shape=() -- array([4])
init: name='int64_1_cast' type=int64 shape=() -- array([1])
init: name='tmp_14' type=float32 shape=(1,) -- array([1.], dtype=float32)
Reshape(x, neg_1) -> self_flatten
SequenceEmpty() -> one_seq
Loop(rank_0, , indices, one_seq, body=G1) -> indices_16, one_seq_17
CastLike(indices, indices_16) -> storage_offset_cast
Add(indices_16, storage_offset_cast) -> indices_19
Gather(self_flatten, indices_19) -> as_strided
output: name='as_strided' type=dtype('float32') shape=[2, 2, 8, 4]
----- subgraph ---- Loop - n6_2 - att.body=G1 -- level=1 -- i,cond_in,indices_1,one_seq_2 -> cond_out,indices_13,one_seq_15
input: name='i' type=dtype('int64') shape=None
input: name='cond_in' type=dtype('bool') shape=None
input: name='indices_1' type='NOTENSOR' shape=None
input: name='one_seq_2' type='NOTENSOR' shape=None
Equal(i, indices) -> cond
Sub(rank_0, i) -> tmp
Sub(tmp, int64_1_cast) -> j
Reshape(j, neg_1) -> j_tensor
Gather(val_0, j_tensor, axis=0) -> size_dim_j
Range(indices, size_dim_j, int64_1_cast) -> tmp_6
Slice(val_0, j_tensor, rank_tensor) -> size_after_j
Expand(indices_1, size_after_j) -> indices_4
Gather(val_1, j_tensor, axis=0) -> stride_dim_j
Mul(tmp_6, stride_dim_j) -> add_value
If(cond, then_branch=G2, else_branch=G3) -> shape_11
Reshape(add_value, shape_11) -> add_value_12
Add(indices_4, add_value_12) -> indices_13
SequenceInsert(one_seq_2, tmp_14) -> one_seq_15
Identity(cond_in) -> cond_out
output: name='cond_out' type=dtype('bool') shape=None
output: name='indices_13' type='NOTENSOR' shape=None
output: name='one_seq_15' type='NOTENSOR' shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=2 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=2 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_2, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
----- subgraph ---- If - n20 - att.then_branch=G2 -- level=1 -- -> shape
Identity(size_dim_j) -> shape
output: name='shape' type=dtype('int64') shape=[1]
----- subgraph ---- If - n20 - att.else_branch=G3 -- level=1 -- -> shape_10
Cast(size_dim_j, to=1) -> tmp_8
ConcatFromSequence(one_seq_2, axis=0) -> ones
Concat(tmp_8, ones, axis=0) -> shape_9
Cast(shape_9, to=7) -> shape_10
output: name='shape_10' type=dtype('int64') shape=None
tracing#
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,) -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> x::RSh-1
Gather(x::RSh-1, init7_s128_) -> _onx_gather_x::RSh-1
Reshape(_onx_gather_x::RSh-1, init7_s4_2_2_8_4) -> output
output: name='output' type=dtype('float32') shape=[2, 2, 8, 4]
new-tracing#
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 8, 8]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape
init: name='init7_s128_' type=int64 shape=(128,) -- Opset.make_node.0
init: name='init7_s4_2_2_8_4' type=int64 shape=(4,) -- array([2, 2, 8, 4])-- Opset.make_node.1/Shape
Reshape(x, init7_s1_-1) -> x::RSh-1
Gather(x::RSh-1, init7_s128_) -> _onx_gather_x::RSh-1
Reshape(_onx_gather_x::RSh-1, init7_s4_2_2_8_4) -> output
output: name='output' type=dtype('float32') shape=[2, 2, 8, 4]
AtenInterpolate#
code: yobx.torch.testing._model_eval_cases.AtenInterpolate
forward#
def forward(self, x):
y = torch.nn.functional.interpolate(
x, scale_factor=2.0, mode="bilinear", recompute_scale_factor=False
)
return y
yobx#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> x::Shape:2
Concat(x::Shape:2, init7_s2_6_8, axis=0) -> _onx_concat_x::Shape:2
Resize(x, , , _onx_concat_x::Shape:2, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 2, 6, 8]
dynamo-ir#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='val_0' type=float32 shape=(4,) -- array([1., 1., 2., 2.], dtype=float32)
Resize(x, , val_0, keep_aspect_ratio_policy=b'stretch', antialias=0, extrapolation_value=0.00, exclude_outside=0, nearest_mode=b'floor', coordinate_transformation_mode=b'pytorch_half_pixel', cubic_coeff_a=-0.75, mode=b'linear') -> upsample_bilinear2d
output: name='upsample_bilinear2d' type=dtype('float32') shape=['batch', 2, 6, 8]
tracing#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> x::Shape:2
Concat(x::Shape:2, init7_s2_6_8, axis=0) -> _onx_concat_x::Shape:2
Resize(x, , , _onx_concat_x::Shape:2, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1', 'd_output_2', 'd_output_3']
new-tracing#
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 2, 3, 4]
init: name='init7_s2_6_8' type=int64 shape=(2,) -- array([6, 8]) -- _aten_upsample_output_size.rsize
Shape(x, end=2, start=0) -> x::Shape:2
Concat(x::Shape:2, init7_s2_6_8, axis=0) -> _onx_concat_x::Shape:2
Resize(x, , , _onx_concat_x::Shape:2, coordinate_transformation_mode=b'pytorch_half_pixel', mode=b'linear', nearest_mode=b'floor') -> output
output: name='output' type=dtype('float32') shape=['batch', 2, 6, 8]
AtenNonZero#
code: yobx.torch.testing._model_eval_cases.AtenNonZero
forward#
def forward(self, x):
y = torch.nonzero(x)
return y
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> output_0
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> val_0
Transpose(val_0, perm=[1,0]) -> nonzero
output: name='nonzero' type=dtype('int64') shape=['u0', 2]
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
new-tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> output
output: name='output' type=dtype('int64') shape=['NEWDIM_nonzero', 2]
AtenNonZeroTuple#
code: yobx.torch.testing._model_eval_cases.AtenNonZeroTuple
forward#
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- ReshapeIsSqueezePattern.m1##ReshapeIsSqueezePattern.m1
NonZero(x) -> _onx_nonzero_x
Split(_onx_nonzero_x, num_outputs=2) -> _onx_split_nonzero_x_0, _onx_split_nonzero_x_1
Squeeze(_onx_split_nonzero_x_0, init7_s1_0) -> output_0
Squeeze(_onx_split_nonzero_x_1, init7_s1_0) -> output_1
output: name='output_0' type=dtype('int64') shape=['u0']
output: name='output_1' type=dtype('int64') shape=['u0']
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='val_7' type=int64 shape=(1,) -- array([1])
NonZero(x) -> val_0
Transpose(val_0, perm=[1,0]) -> nonzero
Split(nonzero, num_outputs=2, axis=-1) -> val_6, val_11
Squeeze(val_6, val_7) -> getitem
Squeeze(val_11, val_7) -> getitem_1
output: name='getitem' type=dtype('int64') shape=['u0']
output: name='getitem_1' type=dtype('int64') shape=['u0']
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- ReshapeIsSqueezePattern.m1##ReshapeIsSqueezePattern.m1
NonZero(x) -> _onx_nonzero_x
Split(_onx_nonzero_x, num_outputs=2) -> _onx_split_nonzero_x_0, _onx_split_nonzero_x_1
Squeeze(_onx_split_nonzero_x_0, init7_s1_0) -> output_0
Squeeze(_onx_split_nonzero_x_1, init7_s1_0) -> output_1
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero']
output: name='output_1' type=dtype('int64') shape=['NEWDIM_nonzero']
new-tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- aten_unbind_int.dim_np
NonZero(x) -> _onx_nonzero_x
Transpose(_onx_nonzero_x, perm=[1,0]) -> nonzero_default
Split(nonzero_default, axis=1, num_outputs=2) -> unbind_int_u0, unbind_int_u1
Squeeze(unbind_int_u0, init7_s1_1) -> output_0
Squeeze(unbind_int_u1, init7_s1_1) -> output_1
output: name='output_0' type=dtype('int64') shape=['NEWDIM_nonzero']
output: name='output_1' type=dtype('int64') shape=['NEWDIM_nonzero']
AtenRollPos#
code: yobx.torch.testing._model_eval_cases.AtenRollPos
forward#
def forward(self, x):
return torch.roll(x, 1, -1)
yobx#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir#
FAILED
An error occurred when running the '<onnx_ir.passes.PassManager object at 0x7e2873886ab0>' pass after the following passes: ['<onnx_ir.passes.common.inliner.InlinePass object at 0x7e28738862a0>']
tracing#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_-1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_-1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
new-tracing#
FAILED
Could not guard on data-dependent expression Eq(12*u0, 0) (unhinted: Eq(12*u0, 0)). (Size-like symbols: none)
consider using data-dependent friendly APIs such as guard_or_false, guard_or_true and statically_known_true.
Caused by: (_refs/__init__.py:4096 in roll)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
AtenRollRelu#
code: yobx.torch.testing._model_eval_cases.AtenRollRelu
forward#
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
yobx#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> _onx_concat_slice_x
Relu(_onx_concat_slice_x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 4]
dynamo-ir#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='val_0' type=int64 shape=(1,) -- array([-1])
init: name='val_2' type=int64 shape=(1,) -- array([1])
init: name='val_3' type=int64 shape=(1,) -- array([0])
Size(x) -> val_5
Reshape(val_5, val_0, allowzero=0) -> val_6
Slice(x, val_2, val_6, val_0) -> val_7
Slice(x, val_3, val_2, val_0) -> val_4
Concat(val_7, val_4, axis=-1) -> roll
Relu(roll) -> relu
output: name='relu' type=dtype('float32') shape=['batch', 3, 4]
tracing#
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 4]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape
init: name='init7_s1_-1' type=int64 shape=(1,) -- array([-1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_1, init7_s1_4, init7_s1_-1) -> _onx_slice_x
Slice(x, init7_s1_0, init7_s1_1, init7_s1_-1) -> _onx_slice_x2
Concat(_onx_slice_x, _onx_slice_x2, axis=-1) -> _onx_concat_slice_x
Relu(_onx_concat_slice_x) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 4]
new-tracing#
FAILED
Could not guard on data-dependent expression Eq(12*u0, 0) (unhinted: Eq(12*u0, 0)). (Size-like symbols: none)
consider using data-dependent friendly APIs such as guard_or_false, guard_or_true and statically_known_true.
Caused by: (_refs/__init__.py:4096 in roll)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
BuildInIsInstance#
code: yobx.torch.testing._model_eval_cases.BuildInIsInstance
forward#
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.21657902, -0.08821903, -0.48450878], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.43308964], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.2733309 , -0.40132207, 0.22971864], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.5482479], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([0.08145382], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.54795367, 0.19638336, -0.24336351], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
new-tracing#
FAILED
'Dim' object has no attribute 'items'
BuildInLen#
code: yobx.torch.testing._model_eval_cases.BuildInLen
forward#
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.39686862, -0.12332802, -0.19533376], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.32887343], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.17662609, 0.47379133, -0.45595127], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.17736335], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_1
Add(sub_4, mul_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([0.283617], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.40892956, 0.3099942 , -0.4227712 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
new-tracing#
FAILED
'Dim' object has no attribute 'items'
ComplexPolar#
code: yobx.torch.testing._model_eval_cases.ComplexPolar
forward#
def forward(self, x, angle):
return torch.polar(x, angle)
yobx#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='init14_s1_' type=complex64 shape=(1,) -- array([0.+1.j], dtype=complex64)-- Opset.make_node.1/Small
Cast(x, to=14) -> x::C14
Cos(angle) -> _onx_cos_angle
Cast(_onx_cos_angle, to=14) -> _onx_cos_angle::C14
Sin(angle) -> _onx_sin_angle
Cast(_onx_sin_angle, to=14) -> _onx_sin_angle::C14
Mul(_onx_sin_angle::C14, init14_s1_) -> _onx_mul_sin_angle::C14
Add(_onx_cos_angle::C14, _onx_mul_sin_angle::C14) -> _onx_add_cos_angle::C14
Mul(x::C14, _onx_add_cos_angle::C14) -> output_0
output: name='output_0' type=dtype('complex64') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_sin_angle::C14) of operator (Mul) in node (polar5) is invalid.
dynamo-ir#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='int64_m1_1d' type=int64 shape=(1,) -- array([-1])
Cos(angle) -> tmp
Mul(x, tmp) -> tmp_0
Unsqueeze(tmp_0, int64_m1_1d) -> real
Sin(angle) -> tmp_1
Mul(x, tmp_1) -> tmp_2
Unsqueeze(tmp_2, int64_m1_1d) -> imag
Concat(real, imag, axis=-1) -> polar
output: name='polar' type=dtype('float32') shape=['batch', 4, 2]
FAILED
diff.0
tracing#
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='angle' type=dtype('float32') shape=['batch', 4]
init: name='init14_s1_' type=complex64 shape=(1,) -- array([0.+1.j], dtype=complex64)-- Opset.make_node.1/Small
Cast(x, to=14) -> x::C14
Cos(angle) -> _onx_cos_angle
Cast(_onx_cos_angle, to=14) -> _onx_cos_angle::C14
Sin(angle) -> _onx_sin_angle
Cast(_onx_sin_angle, to=14) -> _onx_sin_angle::C14
Mul(_onx_sin_angle::C14, init14_s1_) -> _onx_mul_sin_angle::C14
Add(_onx_cos_angle::C14, _onx_mul_sin_angle::C14) -> _onx_add_cos_angle::C14
Mul(x::C14, _onx_add_cos_angle::C14) -> output
output: name='output' type=dtype('complex64') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. Type Error: Type 'tensor(complex64)' of input parameter (_onx_sin_angle::C14) of operator (Mul) in node (polar5) is invalid.
new-tracing#
E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] failed while attempting to run meta for aten.complex.default E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] Traceback (most recent call last): E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_subclasses/fake_tensor.py”, line 2935, in _dispatch_impl E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] r = func(*args, **kwargs) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_ops.py”, line 871, in __call__ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self._op(*args, **kwargs) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_prims_common/wrappers.py”, line 314, in _fn E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] result = fn(*args, **kwargs) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_meta_registrations.py”, line 3573, in meta_complex E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] real.to(corresponding_complex_dtype(real.dtype)), E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/utils/_stats.py”, line 29, in wrapper E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return fn(*args, **kwargs) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_subclasses/fake_tensor.py”, line 900, in __torch_dispatch__ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return handler(args) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_subclasses/fake_tensor.py”, line 3423, in <lambda> E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] torch.ops.aten.size.default: lambda args: tuple( E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/_subclasses/fake_tensor.py”, line 3424, in <genexpr> E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] int(s) for s in cast(Tensor, args[0]).size() E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/__init__.py”, line 463, in __int__ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self.node.int_() E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/sym_node.py”, line 472, in int_ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self.guard_int(“”, 0) # NB: uses Python backtrace E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/sym_node.py”, line 522, in guard_int E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] r = self.evaluate() E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/sym_node.py”, line 516, in evaluate E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self.shape_env.evaluate_sym_node(self, size_oblivious) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 7849, in evaluate_sym_node E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self.evaluate_expr( E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 7945, in evaluate_expr E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self._inner_evaluate_expr( E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/recording.py”, line 285, in wrapper E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return retlog(fn(*args, **kwargs)) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 7968, in _inner_evaluate_expr E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] return self._evaluate_expr( E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] ^^^^^^^^^^^^^^^^^^^^ E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 8201, in _evaluate_expr E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] raise self._make_data_dependent_error( E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode: Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] Caused by: (utils/_stats.py:29 in wrapper) E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] For more information, run with TORCH_LOGS=”dynamic” E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL=”u0” E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1 E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] E0409 00:09:15.905000 15502 torch/_subclasses/fake_tensor.py:2939] For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
FAILED
Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (utils/_stats.py:29 in wrapper)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
ControlFlowCond#
code: yobx.torch.testing._model_eval_cases.ControlFlowCond
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0
Cos(x) -> cos_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output
output: name='output' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc
Cos(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc
Sin(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.386, line 7)
ControlFlowCond2Inputs#
code: yobx.torch.testing._model_eval_cases.ControlFlowCond2Inputs
forward#
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
yobx#
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> sin2
Add(sin2, y) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cos2
Add(cos2, y) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=['batch', 3]
output: name='getitem_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0,add_12_true_graph_0
Cos(x) -> cos
Add(cos, y) -> add_12_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='add_12_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0,add_12_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_2
Add(sin_2, y) -> add_12_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='add_12_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc#0,condcc#1
Cos(x) -> condcc#0
Sin(x) -> sin2
Add(sin2, y) -> condcc#1
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc#0,condcc#1
Cos(x) -> cos2
Add(cos2, y) -> condcc#1
Sin(x) -> condcc#0
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.533, line 7)
ControlFlowCond2Outputs#
code: yobx.torch.testing._model_eval_cases.ControlFlowCond2Outputs
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#0
Sin(x) -> cond#1
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0,cond#1
Cos(x) -> cond#1
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
output: name='cond#1' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem, getitem_1
output: name='getitem' type=dtype('float32') shape=['batch', 3]
output: name='getitem_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.then_branch=G1 -- level=1 -- -> sin_true_graph_0,cos_true_graph_0
Cos(x) -> cos_true_graph_0
Sin(x) -> sin_true_graph_0
output: name='sin_true_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='cos_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__1 - att.else_branch=G2 -- level=1 -- -> cos_false_graph_0,sin_false_graph_0
Cos(x) -> cos_false_graph_0
Sin(x) -> sin_false_graph_0
output: name='cos_false_graph_0' type=dtype('float32') shape=['batch', 3]
output: name='sin_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1
output: name='output_0' type=dtype('float32') shape=['batch', 3]
output: name='output_1' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc#0,condcc#1
Cos(x) -> condcc#0
Sin(x) -> condcc#1
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc#0,condcc#1
Cos(x) -> condcc#1
Sin(x) -> condcc#0
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.680, line 7)
ControlFlowCondConstant#
code: yobx.torch.testing._model_eval_cases.ControlFlowCondConstant
forward#
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
yobx#
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='init7_s2_1_10242_cst2init' type=int64 shape=(2,) -- array([ 1, 1024])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_10242_cst2init' type=int64 shape=(1,) -- array([1024])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
ConstantOfShape(init7_s2_1_10242_cst2init, value=[1.0]) -> ones2
Cos(x) -> cos2
Add(cos2, ones2) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Shape(x, end=1, start=0) -> x::Shape:12
Concat(x::Shape:12, init7_s1_10242_cst2init, axis=0) -> _onx_concat_sym_size_int_1::UnSq02
ConstantOfShape(_onx_concat_sym_size_int_1::UnSq02, value=[1.0]) -> ones32
Sin(x) -> sin2
Sub(sin2, ones32) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_7' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1024])
init: name='ones_2' type=float32 shape=(1, 1024)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> sub_3_true_graph_0
Shape(x, end=1, start=0) -> val_0_2
Concat(val_0_2, val_3, axis=0) -> val_4
Expand(val_7, val_4) -> ones
Sin(x) -> sin
Sub(sin, ones) -> sub_3_true_graph_0
output: name='sub_3_true_graph_0' type=dtype('float32') shape=['batch', 1024]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> add_6_false_graph_0
Cos(x) -> cos
Add(cos, ones_2) -> add_6_false_graph_0
output: name='add_6_false_graph_0' type=dtype('float32') shape=['batch', 1024]
tracing#
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 1024]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt)
init: name='init7_s2_1_10242_cst2init' type=int64 shape=(2,) -- array([ 1, 1024])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc
ConstantOfShape(init7_s2_1_10242_cst2init, value=[1.0]) -> ones2
Cos(x) -> cos2
Add(cos2, ones2) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc
Shape(x) -> size2
ConstantOfShape(size2, value=[1.0]) -> ones32
Sin(x) -> sin2
Sub(sin2, ones32) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
new-tracing#
FAILED
ones() received an invalid combination of arguments - got (TracingShape, dtype=torch.dtype), but expected one of:
* (tuple of ints size, *, tuple of names names, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (tuple of ints size, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
ControlFlowCondIdentity_153832#
code: yobx.torch.testing._model_eval_cases.ControlFlowCondIdentity_153832
forward#
def forward(self, x, y):
def branch_cond_then_1(x):
x = torch.abs(x) + 1
return x
def branch_cond_else_1(x):
return x # fails but succeeds with x.clone()
x = torch.cond(x.sum() > 0, branch_cond_then_1, branch_cond_else_1, [x])
return x + y
yobx#
FAILED
This higher order operator doesn't work unless it is captured completely with torch.compile. Got graph break/error:
Encountered aliasing during higher order op tracing
Higher Order Operator: torch.cond
Explanation: Higher order ops do not support aliasing. Found in <bound method HigherOrderOperator.name of <torch._higher_order_ops.cond.CondOp object at 0x7e28841f4ad0>>
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 242, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1152, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UncapturedHigherOrderOpError'>: This higher order operator doesn't work unless it is captured completely with torch.compile. Got graph break/error:
Encountered aliasing during higher order op tracing
Higher Order Operator: torch.cond
Explanation: Higher order ops do not support aliasing. Found in <bound method HigherOrderOperator.name of <torch._higher_order_ops.cond.CondOp object at 0x7e28841f4ad0>>
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 242, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1152, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
(Refer to the full stack trace above for more information.)
tracing#
FAILED
This higher order operator doesn't work unless it is captured completely with torch.compile. Got graph break/error:
Encountered aliasing during higher order op tracing
Higher Order Operator: torch.cond
Explanation: Higher order ops do not support aliasing. Found in <bound method HigherOrderOperator.name of <torch._higher_order_ops.cond.CondOp object at 0x7e28841f4ad0>>
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 242, in _cond_op_wrapper
return cond_op(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
new-tracing#
W0409 00:09:37.735000 15502 torch/fx/experimental/symbolic_shapes.py:7980] failed during evaluate_expr(Ne(Max(1, u1), u1), hint=None, size_oblivious=False, forcing_spec=False
FAILED
vr must not be None for symbol u1
ControlFlowCondNestedModule#
code: yobx.torch.testing._model_eval_cases.ControlFlowCondNestedModule
forward#
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
yobx#
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt_Scalar)
init: name='init7_s_1002_cst2init' type=int64 shape=() -- array([100])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)-- DynamoInterpret.placeholder.1/P(weight)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)-- DynamoInterpret.placeholder.1/P(submodule.weight)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C12
Sub(x::C12, weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
Abs(x) -> abs_12
ReduceSum(abs_12, keepdims=0) -> sum_122
Greater(sum_122, init7_s_1002_cst2init) -> gt22
If(gt22, else_branch=G3, then_branch=G4) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> cond#0
Cast(x, to=1) -> x::C132
Div(x::C132, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> cond#0
Cast(x, to=1) -> x::C142
Mul(x::C142, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C132
Div(x::C132, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> cond#0
Cast(x, to=1) -> x::C142
Mul(x::C142, submodule.weight) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('int64') shape=['batch']
init: name='weight' type=float32 shape=(1,) -- array([42.], dtype=float32)
init: name='submodule.weight' type=float32 shape=(1,) -- array([100.], dtype=float32)
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_0_2' type=int64 shape=() -- array([100])
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, val_0) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
Abs(x) -> abs_1
ReduceSum(abs_1, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Greater(sum_1_2, val_0_2) -> gt_2
If(gt_2, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=2 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=2 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=1 -- -> mul_1_true_graph_0__true_graph_0
Cast(x, to=1) -> convert_element_type_default
Mul(convert_element_type_default, submodule.weight) -> mul_1_true_graph_0__true_graph_0
output: name='mul_1_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=1 -- -> div_true_graph_0__false_graph_0
Cast(x, to=1) -> convert_element_type_default_2
Div(convert_element_type_default_2, submodule.weight) -> div_true_graph_0__false_graph_0
output: name='div_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch']
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> sub_1_false_graph_0
Cast(x, to=1) -> convert_element_type_default_3
Sub(convert_element_type_default_3, weight) -> sub_1_false_graph_0
output: name='sub_1_false_graph_0' type=dtype('float32') shape=['batch']
tracing#
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('int64') shape=['batch']
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt)
init: name='weight2_cst2init' type=float32 shape=(1,) -- array([42.], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
init: name='weight32_cst2init' type=float32 shape=(1,) -- array([100.], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
init: name='init7_s_1002_cst2init' type=int64 shape=() -- array([100])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output
output: name='output' type=dtype('float32') shape=['batch']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc
Cast(x, to=1) -> arg0::C12
Sub(arg0::C12, weight2_cst2init) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc
Abs(x) -> abs_12
ReduceSum(abs_12, keepdims=0) -> sum_122
Greater(sum_122, init7_s_1002_cst2init) -> gt22
If(gt22, else_branch=G3, then_branch=G4) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> condcc
Cast(x, to=1) -> arg0::C132
Div(arg0::C132, weight32_cst2init) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> condcc
Cast(x, to=1) -> arg0::C142
Mul(arg0::C142, weight32_cst2init) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> condcc
Cast(x, to=1) -> arg0::C132
Div(arg0::C132, weight32_cst2init) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> condcc
Cast(x, to=1) -> arg0::C142
Mul(arg0::C142, weight32_cst2init) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.1250, line 7)
ControlFlowCondNonZero#
code: yobx.torch.testing._model_eval_cases.ControlFlowCondNonZero
forward#
def forward(self, input_ids, image_features, vocab_size):
def then_branch(input_ids, image_features, vocab_size):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
condition = (input_ids < 0) & (input_ids > -int(1e9))
positions = torch.nonzero(condition, as_tuple=True)
input_ids = input_ids.clamp_min(0).clamp_max(vocab_size)
return (input_ids, positions[0], positions[1])
def else_branch(input_ids, image_features, vocab_size):
r = torch.where(torch.zeros((1, 1), dtype=torch.bool))
return (input_ids, r[0], r[1])
a, b, c = torch.cond(
image_features.numel() > 0,
then_branch,
else_branch,
[input_ids, image_features, vocab_size],
)
return a, b, c
yobx#
FAILED
Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'RuntimeError'>: Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
(Refer to the full stack trace above for more information.)
tracing#
inputs:
#2[(T7s2x12,T1s2x16,int),(T7s2x12,T1s2x0,int)]shapes:
({0:Dim(batch)},{0:Dim(batch),1:Dim(seq_length)},None)
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='input_ids' type=dtype('int64') shape=['batch', 12]
input: name='image_features' type=dtype('float32') shape=['batch', 'seq_length']
input: name='vocab_size' type=dtype('int64') shape=None
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- shape_type_compute._cast_inputs.1(gt)
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s_-1000000000::RSh12_cst2init' type=int64 shape=(1,) -- array([-1000000000])-- GraphBuilderPatternOptimization.make_initializer.1/Shape
Size(image_features) -> numel
Greater(numel, init7_s_0) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0, output_1, output_2
output: name='output_0' type=dtype('int64') shape=['batch', 12]
output: name='output_1' type=dtype('int64') shape=['NEWDIM_nonzero']
output: name='output_2' type=dtype('int64') shape=['NEWDIM_nonzero']
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc#0,condcc#1,condcc#2
Constant(value=[]) -> condcc#1
Identity(condcc#1) -> condcc#2
Identity(input_ids) -> condcc#0
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
output: name='condcc#2' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc#0,condcc#1,condcc#2
Clip(input_ids, init7_s1_02_cst2init) -> clamp_min2
Min(clamp_min2, vocab_size) -> condcc#0
Less(input_ids, init7_s1_02_cst2init) -> lt2
Greater(input_ids, init7_s_-1000000000::RSh12_cst2init) -> gt22
And(lt2, gt22) -> and_2
NonZero(and_2) -> _onx_nonzero_and_2
Split(_onx_nonzero_and_2, num_outputs=2) -> _onx_split_nonzero_and__02, _onx_split_nonzero_and__12
Squeeze(_onx_split_nonzero_and__02, init7_s1_02_cst2init) -> condcc#1
Squeeze(_onx_split_nonzero_and__12, init7_s1_02_cst2init) -> condcc#2
output: name='condcc#0' type='NOTENSOR' shape=None
output: name='condcc#1' type='NOTENSOR' shape=None
output: name='condcc#2' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.1254, line 5)
ControlFlowIndirectRanks#
code: yobx.torch.testing._model_eval_cases.ControlFlowIndirectRanks
forward#
def forward(self, x):
x1 = x + 1
if x1.ndim == 2:
return x1.clone()
return x / x1.ndim
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape
Add(x, init1_s_::RSh1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='scalar_tensor_default' type=float32 shape=() -- array([1.], dtype=float32)
Add(x, scalar_tensor_default) -> clone
output: name='clone' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape
Add(x, init1_s_::RSh1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape
Add(x, init1_s_::RSh1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
ControlFlowIndirectRanksCat#
code: yobx.torch.testing._model_eval_cases.ControlFlowIndirectRanksCat
forward#
def forward(self, x, y):
x1 = x + 1
y1 = y + 2
cat = torch.cat([x1, y1], dim=1)
if cat.ndim == 2:
return cat.clone()
return cat / cat.ndim
yobx#
inputs:
#2[(T1s3x4,T1s3x4),(T1s5x4,T1s5x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='y' type=dtype('float32') shape=['batch', 'seq']
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_2::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Add(x, init1_s_::RSh1) -> add
Add(y, init1_s_2::RSh1) -> add_1
Concat(add, add_1, axis=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'seq+4']
dynamo-ir#
inputs:
#2[(T1s3x4,T1s3x4),(T1s5x4,T1s5x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='y' type=dtype('float32') shape=['batch', 'seq']
init: name='scalar_tensor_default' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default_1' type=float32 shape=() -- array([2.], dtype=float32)
Add(x, scalar_tensor_default) -> add
Add(y, scalar_tensor_default_1) -> add_4
Concat(add, add_4, axis=1) -> clone
output: name='clone' type=dtype('float32') shape=['batch', 'seq + 4']
tracing#
inputs:
#2[(T1s3x4,T1s3x4),(T1s5x4,T1s5x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='y' type=dtype('float32') shape=['batch', 'seq']
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_2::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Add(x, init1_s_::RSh1) -> _onx_add_x
Add(y, init1_s_2::RSh1) -> _onx_add_y
Concat(_onx_add_x, _onx_add_y, axis=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 'seq+4']
new-tracing#
FAILED
# no error found for the failure
ControlFlowNestCond#
code: yobx.torch.testing._model_eval_cases.ControlFlowNestCond
forward#
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
yobx#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt_Scalar)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> cond#0
Neg(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> cond#0
ReduceSum(x, keepdims=0) -> sum_122
Less(sum_122, init1_s_) -> lt2
If(lt2, else_branch=G3, then_branch=G4) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> cond#0
Cos(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> cond#0
Sin(x) -> cond#0
output: name='cond#0' type='NOTENSOR' shape=None
dynamo-ir#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='scalar_tensor_default' type=float32 shape=() -- array([0.], dtype=float32)
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1
Greater(sum_1, scalar_tensor_default) -> gt
If(gt, then_branch=G1, else_branch=G2) -> getitem
output: name='getitem' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.then_branch=G1 -- level=1 -- -> getitem_true_graph_0
ReduceSum(x, noop_with_empty_axes=0, keepdims=0) -> sum_1_2
Less(sum_1_2, scalar_tensor_default) -> lt
If(lt, then_branch=G3, else_branch=G4) -> getitem_true_graph_0
output: name='getitem_true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=2 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=2 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.then_branch=G3 -- level=1 -- -> sin_true_graph_0__true_graph_0
Sin(x) -> sin_true_graph_0__true_graph_0
output: name='sin_true_graph_0__true_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0_2 - att.else_branch=G4 -- level=1 -- -> cos_true_graph_0__false_graph_0
Cos(x) -> cos_true_graph_0__false_graph_0
output: name='cos_true_graph_0__false_graph_0' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - node_cond__0 - att.else_branch=G2 -- level=1 -- -> neg_false_graph_0
Neg(x) -> neg_false_graph_0
output: name='neg_false_graph_0' type=dtype('float32') shape=['batch', 3]
tracing#
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions.0' version=1
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- shape_type_compute._cast_inputs.1(gt)
ReduceSum(x, keepdims=0) -> sum_1
Greater(sum_1, init1_s_) -> gt
If(gt, else_branch=G1, then_branch=G2) -> output
output: name='output' type=dtype('float32') shape=['batch', 3]
----- subgraph ---- If - cond - att.else_branch=G1 -- level=1 -- -> condcc
Neg(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond - att.then_branch=G2 -- level=1 -- -> condcc
ReduceSum(x, keepdims=0) -> sum_122
Less(sum_122, init1_s_) -> lt2
If(lt2, else_branch=G3, then_branch=G4) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=2 -- -> condcc
Cos(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=2 -- -> condcc
Sin(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.else_branch=G3 -- level=1 -- -> condcc
Cos(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
----- subgraph ---- If - cond22 - att.then_branch=G4 -- level=1 -- -> condcc
Sin(x) -> condcc
output: name='condcc' type='NOTENSOR' shape=None
new-tracing#
FAILED
invalid syntax (<eval_with_key>.1634, line 7)
ControlFlowNumelZero1#
code: yobx.torch.testing._model_eval_cases.ControlFlowNumelZero1
forward#
def forward(self, x):
def empty_cache(x):
return x.shape[-2]
size = (empty_cache(x), 1)
return torch.full(size, fill_value=2)
yobx#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 2, 'D0_1', 5]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- GraphBuilder.make_shape_from_results.conc
Shape(x, end=3, start=2) -> x::Shape2:3
Concat(x::Shape2:3, init7_s1_1, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[2]) -> output_0
output: name='output_0' type=dtype('int64') shape=['D0_1', 1]
dynamo-ir#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 2, 's53', 5]
init: name='val_2' type=float32 shape=() -- array([2.], dtype=float32)
init: name='val_5' type=int64 shape=(1,) -- array([1])
Shape(x, end=3, start=2) -> val_0
Concat(val_0, val_5, axis=0) -> val_6
Expand(val_2, val_6) -> full
output: name='full' type=dtype('float32') shape=['s53', 1]
tracing#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 2, 'D0_1', 5]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##GraphBuilder.make_shape_from_results.conc
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(x) -> size
Gather(size, init7_s1_2) -> _onx_gather_size3
Concat(_onx_gather_size3, init7_s1_1, axis=0) -> _onx_concat_getitem_2::UnSq0
ConstantOfShape(_onx_concat_getitem_2::UnSq0, value=[2]) -> output
output: name='output' type=dtype('int64') shape=['D0_1', 1]
new-tracing#
FAILED
full(): argument 'size' (position 1) must be tuple of ints, but found element of type TracingInt at pos 0
ControlFlowNumelZero2#
code: yobx.torch.testing._model_eval_cases.ControlFlowNumelZero2
forward#
def forward(self, x):
def empty_cache(x):
if x.numel() == 0:
return 0
return x.shape[-2]
size = (empty_cache(x), 1)
return torch.full(size, fill_value=2)
yobx#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 2, 'D0_1', 5]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- GraphBuilder.make_shape_from_results.conc
Shape(x, end=3, start=2) -> x::Shape2:3
Concat(x::Shape2:3, init7_s1_1, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[2]) -> output_0
output: name='output_0' type=dtype('int64') shape=['D0_1', 1]
dynamo-ir#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 2, 's53', 5]
init: name='val_2' type=float32 shape=() -- array([2.], dtype=float32)
init: name='val_5' type=int64 shape=(1,) -- array([1])
Shape(x, end=3, start=2) -> val_0
Concat(val_0, val_5, axis=0) -> val_6
Expand(val_2, val_6) -> full
output: name='full' type=dtype('float32') shape=['s53', 1]
tracing#
inputs:
#3[(T1s3x2x2x5,),(T1s3x2x1x5,),(T1s3x2x0x5,)]shapes:
dict(x:{0:DYNAMIC,2:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 2, 'D0_1', 5]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##GraphBuilder.make_shape_from_results.conc
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(x) -> size
Gather(size, init7_s1_2) -> _onx_gather_size3
Concat(_onx_gather_size3, init7_s1_1, axis=0) -> _onx_concat_getitem_2::UnSq0
ConstantOfShape(_onx_concat_getitem_2::UnSq0, value=[2]) -> output
output: name='output' type=dtype('int64') shape=['D0_1', 1]
new-tracing#
FAILED
Function ControlFlowNumelZero2() returned a real torch.Tensor. All tensor outputs must be TracingTensor instances produced during tracing.
ControlFlowRanks#
code: yobx.torch.testing._model_eval_cases.ControlFlowRanks
forward#
def forward(self, x):
if x.ndim == 2:
return x.clone()
return x / x.ndim
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> clone
output: name='clone' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
ControlFlowRanksType#
code: yobx.torch.testing._model_eval_cases.ControlFlowRanksType
forward#
def forward(self, x=None):
if (
x is not None
and (x.dtype == torch.float32 or x.dtype == torch.float16)
and x.ndim == 2
):
return x.clone()
torch._check(x is not None)
return (x / x.ndim).to(torch.float32) # type: ignore
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> clone
output: name='clone' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Identity(x) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
ControlFlowScan#
code: yobx.torch.testing._model_eval_cases.ControlFlowScan
forward#
def forward(self, x):
def add(carry: torch.Tensor, y: torch.Tensor):
next_carry = carry + y
return [next_carry, next_carry]
init = torch.zeros_like(x[0])
carry, _out = torch.ops.higher_order.scan(add, [init], [x], additional_inputs=[])
return carry
yobx#
inputs:
#1[(T1s3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0, scan#1
output: name='output_0' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
dynamo-ir#
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: scan might be aliasing the input or the output!
While executing %scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
Original traceback:
File "~/github/yet-another-onnx-builder/yobx/torch/testing/_model_eval_cases.py", line 601, in forward
carry, _out = torch.ops.higher_order.scan(add, [init], [x], additional_inputs=[])
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
(Refer to the full stack trace above for more information.)
tracing#
inputs:
#1[(T1s3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='init7_s1_3' type=int64 shape=(1,) -- array([3]) -- Opset.make_node.1/Shape
ConstantOfShape(init7_s1_3, value=[0.0]) -> zeros_like
Scan(zeros_like, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> output, scancc#1
output: name='output' type=dtype('float32') shape=[3]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,scan_0_x -> output_0,output_1
input: name='init_0_zeros_like' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
new-tracing#
FAILED
index 0 is out of bounds for dimension 0 with size 0
ControlFlowScan2Carried#
code: yobx.torch.testing._model_eval_cases.ControlFlowScan2Carried
forward#
def forward(self, x):
def add(carry1: torch.Tensor, carry2: torch.Tensor, y1: torch.Tensor, y2: torch.Tensor):
next_carry1 = carry1 + y1
next_carry2 = carry2 * y2
return [next_carry1, next_carry2, next_carry1, next_carry2]
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
add,
[init1, init2],
[x, x * 2],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return carry1, carry2, out1, out2
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Mul(x, init1_s_::RSh1) -> _onx_mul_x
Scan(zeros_like, ones_like, x, _onx_mul_x, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['batch', 4]
output: name='output_3' type=dtype('float32') shape=['batch', 4]
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type=dtype('float32') shape=None
input: name='init_1_ones_like' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
input: name='scan_1_mul' type=dtype('float32') shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_2
Mul(init_1_ones_like, scan_1_mul) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
output: name='output_2' type=dtype('float32') shape=None
output: name='output_3' type=dtype('float32') shape=None
dynamo-ir#
FAILED
Failed to decompose the FX graph for ONNX compatibility. [96mThis is step 2/3[0m of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the [96m*onnx*[0m component. Attach the error report and the pt2 model.
## Exception summary
<class 'RuntimeError'>: scan might be aliasing the input or the output!
While executing %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
Original traceback:
File "~/github/yet-another-onnx-builder/yobx/torch/testing/_model_eval_cases.py", line 617, in forward
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
(Refer to the full stack trace above for more information.)
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_4' type=int64 shape=(1,) -- array([4]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul)##init7_s1_1/Opset.make_node.1/Shape
ConstantOfShape(init7_s1_4, value=[1.0]) -> ones_like
ConstantOfShape(init7_s1_4, value=[0.0]) -> zeros_like
Mul(x, init1_s_::RSh1) -> _onx_mul_x
Scan(zeros_like, ones_like, x, _onx_mul_x, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0,0], scan_output_directions=[0,0]) -> output_0, output_1, output_2, output_3
output: name='output_0' type=dtype('float32') shape=[4]
output: name='output_1' type=dtype('float32') shape=[4]
output: name='output_2' type=dtype('float32') shape=['d_output_2_0', 'd_output_2_1']
output: name='output_3' type=dtype('float32') shape=['d_output_3_0', 'd_output_3_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_zeros_like,init_1_ones_like,scan_0_x,scan_1_mul -> output_0,output_1,output_2,output_3
input: name='init_0_zeros_like' type=dtype('float32') shape=None
input: name='init_1_ones_like' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
input: name='scan_1_mul' type=dtype('float32') shape=None
Add(init_0_zeros_like, scan_0_x) -> output_0
Identity(output_0) -> output_2
Mul(init_1_ones_like, scan_1_mul) -> output_1
Identity(output_1) -> output_3
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
output: name='output_2' type=dtype('float32') shape=None
output: name='output_3' type=dtype('float32') shape=None
new-tracing#
FAILED
index 0 is out of bounds for dimension 0 with size 0
ControlFlowScanCDist#
code: yobx.torch.testing._model_eval_cases.ControlFlowScanCDist
forward#
def forward(self, x):
def dist(carry: torch.Tensor, x: torch.Tensor):
sub = carry - x.reshape((1, -1))
sq = sub * sub
rd = sq.sum(dim=1) ** 0.5
# clone --> UnsupportedAliasMutationException:
# Combine_fn might be aliasing the input!
return [carry.clone(), rd]
_carry, out = torch.ops.higher_order.scan(
dist,
[x],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init1_s_2_cst2init' type=float32 shape=() -- array([0.5], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_x) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_x, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Pow(sum_12, init1_s_2_cst2init) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='val_3' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_4' type=int64 shape=(1,) -- array([1])
init: name='val_5' type=float32 shape=() -- array([0.5], dtype=float32)
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- x_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in_1 -> clone_scan_combine_graph_0,pow_1_scan_combine_graph_0
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s77', 4]
input: name='x_scan_combine_graph_0__subgraph_in_1' type=dtype('float32') shape=[4]
Identity(x_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in_1, val_3, allowzero=1) -> view
Sub(x_scan_combine_graph_0__subgraph_in, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
ReduceSum(mul_4, val_4, noop_with_empty_axes=0, keepdims=0) -> sum_1
Pow(sum_1, val_5) -> pow_1_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=['batch', 4]
output: name='pow_1_scan_combine_graph_0' type=dtype('float32') shape=['batch']
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(x, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scancc#0, output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_x,scan_0_x -> output_0,output_1
input: name='init_0_x' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_x) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_x, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
new-tracing#
FAILED
There was no rule registered for HOP scan and subclass <class 'yobx.torch.new_tracing.tensor.TracingTensor'>. We recommend filing an issue.
ControlFlowScanCDist2#
code: yobx.torch.testing._model_eval_cases.ControlFlowScanCDist2
forward#
def forward(self, x):
def dist(unused: torch.Tensor, x: torch.Tensor, samex: torch.Tensor):
sub = samex - x.reshape((1, -1))
sq = sub * sub
rd = torch.sqrt(sq.sum(dim=1))
# clone --> UnsupportedAliasMutationException:
# Combine_fn might be aliasing the input!
return [unused.clone(), rd]
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
dist,
[z],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[y],
)
return out[1]
yobx#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_lifted_tensor_0' type=float32 shape=(1,) -- array([0.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Identity(x) -> hidden_input_scan_0_clone
Scan(c_lifted_tensor_0, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_detach_,scan_0_x -> output_0,output_1
input: name='init_0_detach_' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_detach_) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(hidden_input_scan_0_clone, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
dynamo-ir#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='clone' type=float32 shape=(1,) -- array([0.], dtype=float32)
init: name='val_3' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_4' type=int64 shape=(1,) -- array([1])
Scan(clone, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['batch', 'batch']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- clone_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in -> clone_scan_combine_graph_0,sqrt_scan_combine_graph_0
input: name='clone_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=[1]
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=[4]
Identity(clone_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in, val_3, allowzero=1) -> view
Sub(x, view) -> sub_1
Mul(sub_1, sub_1) -> mul_4
ReduceSum(mul_4, val_4, noop_with_empty_axes=0, keepdims=0) -> sum_1
Sqrt(sum_1) -> sqrt_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=[1]
output: name='sqrt_scan_combine_graph_0' type=dtype('float32') shape=['batch']
tracing#
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='cst' type=float32 shape=(1,) -- array([0.], dtype=float32)-- _process_arg
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Identity(x) -> hidden_input_scan_0_clone
Scan(cst, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scancc#0, output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_cst,scan_0_x -> output_0,output_1
input: name='init_0_cst' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_cst) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(hidden_input_scan_0_clone, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
new-tracing#
FAILED
There was no rule registered for HOP scan and subclass <class 'yobx.torch.new_tracing.tensor.TracingTensor'>. We recommend filing an issue.
ControlFlowScanCDistXY#
code: yobx.torch.testing._model_eval_cases.ControlFlowScanCDistXY
forward#
def forward(self, x, y):
def dist(y: torch.Tensor, scanned_x: torch.Tensor):
sub = y - scanned_x.reshape((1, -1))
sq = sub * sub
rd = torch.sqrt(sq.sum(dim=1))
# clone --> UnsupportedAliasMutationException:
# Combine_fn might be aliasing the input!
return [y.clone(), rd]
_carry, out = torch.ops.higher_order.scan(
dist,
[y],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
yobx#
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scan#0, output_0
output: name='output_0' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_y) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_y, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
dynamo-ir#
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
init: name='val_5' type=int64 shape=(2,) -- array([ 1, -1])
init: name='val_6' type=int64 shape=(1,) -- array([1])
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_directions=[0]) -> scan__0, getitem_1
output: name='getitem_1' type=dtype('float32') shape=['x_rows', 'y_rows']
----- subgraph ---- Scan - node_scan__1 - att.body=G1 -- level=1 -- y_scan_combine_graph_0__subgraph_in,x_scan_combine_graph_0__subgraph_in -> clone_scan_combine_graph_0,sqrt_scan_combine_graph_0
input: name='y_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s17', 's27']
input: name='x_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s27']
Identity(y_scan_combine_graph_0__subgraph_in) -> clone_scan_combine_graph_0
Reshape(x_scan_combine_graph_0__subgraph_in, val_5, allowzero=1) -> view
Sub(y_scan_combine_graph_0__subgraph_in, view) -> sub_4
Mul(sub_4, sub_4) -> mul_7
ReduceSum(mul_7, val_6, noop_with_empty_axes=0, keepdims=0) -> sum_1
Sqrt(sum_1) -> sqrt_scan_combine_graph_0
output: name='clone_scan_combine_graph_0' type=dtype('float32') shape=['y_rows', 'dim']
output: name='sqrt_scan_combine_graph_0' type=dtype('float32') shape=['y_rows']
tracing#
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['x_rows', 'dim']
input: name='y' type=dtype('float32') shape=['y_rows', 'dim']
init: name='init7_s1_12_cst2init' type=int64 shape=(1,) -- array([1]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(y, x, body=G1, num_scan_inputs=1, scan_input_directions=[0], scan_output_axes=[0], scan_output_directions=[0]) -> scancc#0, output
output: name='output' type=dtype('float32') shape=['d_output_0', 'd_output_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- init_0_y,scan_0_x -> output_0,output_1
input: name='init_0_y' type=dtype('float32') shape=None
input: name='scan_0_x' type=dtype('float32') shape=None
Identity(init_0_y) -> output_0
Unsqueeze(scan_0_x, init7_s1_02_cst2init) -> reshape2
Sub(init_0_y, reshape2) -> sub2
Mul(sub2, sub2) -> mul2
ReduceSum(mul2, init7_s1_12_cst2init, keepdims=0) -> sum_12
Sqrt(sum_12) -> output_1
output: name='output_0' type=dtype('float32') shape=None
output: name='output_1' type=dtype('float32') shape=None
new-tracing#
FAILED
There was no rule registered for HOP scan and subclass <class 'yobx.torch.new_tracing.tensor.TracingTensor'>. We recommend filing an issue.
ControlFlowScanDecomposition_151564#
code: yobx.torch.testing._model_eval_cases.ControlFlowScanDecomposition_151564
forward#
def forward(self, images, position):
def dummy_loop(padded: torch.Tensor, pos: torch.Tensor):
copy = torch.zeros(padded.shape)
for i in range(pos.shape[0]):
p = pos[i]
copy[i, :p] = padded[i, :p]
return copy
def dummy_loop_with_scan(padded: torch.Tensor, pos: torch.Tensor):
def pad_row(padded, p):
row = torch.zeros((padded.shape[0],))
torch._check(p.item() > 0)
torch._check(p.item() < padded.shape[0])
# this check is not always true, we add it anyway to make this dimension >= 2
# and avoid raising an exception about dynamic dimension in {0, 1}
if torch.compiler.is_exporting():
torch._check(p.item() > 1)
row[: p.item()] = padded[: p.item()]
return (row,)
return torch.ops.higher_order.scan(pad_row, [], [padded, pos], [])
def select_when_exporting(f, f_scan):
return f_scan if torch.compiler.is_exporting() else f
return select_when_exporting(dummy_loop, dummy_loop_with_scan)(images, position)
yobx#
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
opset: domain='' version=21
opset: domain='aten' version=1
opset: domain='local_functions' version=1
input: name='images' type=dtype('float32') shape=['batch_2', 'channel_1']
input: name='position' type=dtype('int64') shape=['batch_3']
init: name='init7_s1_02_cst2init' type=int64 shape=(1,) -- array([0]) -- GraphBuilderPatternOptimization.make_initializer.1/Shape
Scan(images, position, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_2', 'channel_1']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- scan_0_images,scan_1_position -> output_0
input: name='scan_0_images' type=dtype('float32') shape=None
input: name='scan_1_position' type=dtype('int64') shape=None
Shape(scan_0_images, end=1, start=0) -> padded_1::Shape:12
ConstantOfShape(padded_1::Shape:12, value=[0.0]) -> zeros2
Unsqueeze(scan_1_position, init7_s1_02_cst2init) -> item::UnSq02
Slice(scan_0_images, init7_s1_02_cst2init, item::UnSq02, init7_s1_02_cst2init) -> slice_12
aten_setitem[aten](zeros2, scan_1_position, slice_12) -> output_0
output: name='output_0' type=dtype('float32') shape=None
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'zeros'
input: 'item'
input: 'slice_1'
Constant(value=[0]) -> init7_s1_0
Unsqueeze(item, init7_s1_0) -> item::UnSq0
Shape(zeros) -> zeros::Shape:
Slice(zeros, item::UnSq0, zeros::Shape:, init7_s1_0) -> _onx_slice_zeros
Concat(slice_1, _onx_slice_zeros, axis=0) -> setitem
output: name='setitem' type=? shape=?
dynamo-ir#
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
opset: domain='' version=20
input: name='images' type=dtype('float32') shape=['s34', 's90']
input: name='position' type=dtype('int64') shape=['s71']
init: name='val_13' type=int64 shape=(1,) -- array([0])
init: name='val_37' type=int64 shape=(1,) -- array([1])
init: name='val_1' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=int64 shape=(1,) -- array([-1])
init: name='val_7' type=int64 shape=() -- array([0])
init: name='val_10' type=int64 shape=() -- array([1])
Scan(images, position, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_directions=[0]) -> getitem
output: name='getitem' type=dtype('float32') shape=['s34', 's90']
----- subgraph ---- Scan - node_scan__0 - att.body=G1 -- level=1 -- images_scan_combine_graph_0__subgraph_in,position_scan_combine_graph_0__subgraph_in -> slice_scatter_scan_combine_graph_0
input: name='images_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=['s90']
input: name='position_scan_combine_graph_0__subgraph_in' type=dtype('int64') shape=None
Reshape(position_scan_combine_graph_0__subgraph_in, val_5, allowzero=0) -> val_6
Gather(val_6, val_7, axis=0) -> val_8
Reshape(val_8, val_5, allowzero=0) -> val_16
Slice(images_scan_combine_graph_0__subgraph_in, val_13, val_16, val_13, val_37) -> copy
Shape(images_scan_combine_graph_0__subgraph_in, end=1, start=0) -> val_0
Expand(val_1, val_0) -> zeros
Shape(zeros, start=0) -> val_32
Gather(val_32, val_7, axis=0) -> val_33
Range(val_7, val_33, val_10) -> val_34
Unsqueeze(val_8, val_13) -> val_36
Slice(val_34, val_13, val_36, val_13, val_37) -> val_38
Unsqueeze(val_38, val_5) -> val_39
ScatterND(zeros, val_39, copy, reduction=b'none') -> slice_scatter_scan_combine_graph_0
output: name='slice_scatter_scan_combine_graph_0' type=dtype('float32') shape=['s90']
tracing#
FAILED
'CustomProxyInt' object cannot be interpreted as an integer
new-tracing#
FAILED
zeros(): argument 'size' (position 1) must be tuple of ints, not TracingShape
ControlFlowScanInplace_153705#
code: yobx.torch.testing._model_eval_cases.ControlFlowScanInplace_153705
forward#
def forward(self, x, y):
def loop_body_1(z, iv, x, y):
z = z.clone()
i = iv.item()
z[i, :] = ((x[i, :] - y) ** 2).sum(dim=-1)
return [z, iv]
z = torch.empty((x.shape[0], y.shape[0]))
r = torch.ops.higher_order.scan(
loop_body_1, [z], [torch.arange(x.shape[0], dtype=torch.int64)], [x, y]
)
return r[0]
yobx#
FAILED
only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'IndexError'>: only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Unable to interpret function <class 'builtin_function_or_method'>: <built-in method empty of type object at 0x7e28f4744240>, searched for ['transformers_empty', '_VariableFunctionsClass_empty', 'empty'] and attributes ['__qualname__', '__name__'], args=((getitem, getitem_2),), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-SII] Message starts, there are 2 initializers, 10 nodes, 2 inputs, 2 outputs.
input_names=['x', 'y']
output_names=[]
--CONSTRAINTS--
DYN0 = {'s26'}
DYN1 = {'s49'}
DYN2 = {'s93'}
DYN3 = {'s70'}
s26 = {'DYN0'}
s49 = {'DYN1'}
s70 = {'DYN3'}
s93 = {'DYN2'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
DYN0 = 'DYN0'
DYN1 = 'DYN1'
DYN2 = 'DYN2'
DYN3 = 'DYN3'
s26 = 's26'
s49 = 's49'
s70 = 's70'
s93 = 's93'
dynamic_objects_rev=
'DYN0' = <class 'list'>
tuple
'DYN0'
ERR**: <class 'torch.SymInt'>:'DYN0'
'DYN1' = <class 'list'>
tuple
'DYN1'
ERR**: <class 'torch.SymInt'>:'DYN1'
'DYN2' = <class 'list'>
tuple
'DYN2'
ERR**: <class 'torch.SymInt'>:'DYN2'
'DYN3' = <class 'list'>
tuple
'DYN3'
ERR**: <class 'torch.SymInt'>:'DYN3'
dynamic_dimensions_source={'DYN0': [{'axis': 0, 'input_name': 'x'}],
'DYN1': [{'axis': 1, 'input_name': 'x'}],
'DYN2': [{'axis': 0, 'input_name': 'y'}],
'DYN3': [{'axis': 1, 'input_name': 'y'}]}
dynamic_dimensions_source_flat=['x', 'y']
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'DYN0', 's49': 'DYN1', 's70': 'DYN3', 's93': 'DYN2'}
dynamic_shapes={'x': {0: Dim('DYN0', min=0), 1: Dim('DYN1', min=0)},
'y': {0: Dim('DYN2', min=0), 1: Dim('DYN3', min=0)}}
_known_shapes={'_onx_gather_size': (1,),
'_onx_gather_size2': (1,),
'_onx_gather_size_1': (1,),
'_onx_gather_size_12': (1,),
'getitem': (),
'getitem_1': (),
'getitem_2': (),
'getitem_3': (),
'init7_s1_0': (1,),
'init7_s1_1': (1,),
'size': (2,),
'size_1': (2,),
'x': ('DYN0', 'DYN1'),
'y': ('DYN2', 'DYN3')}
_known_types={'_onx_gather_size': 7,
'_onx_gather_size2': 7,
'_onx_gather_size_1': 7,
'_onx_gather_size_12': 7,
'getitem': 7,
'getitem_1': 7,
'getitem_2': 7,
'getitem_3': 7,
'init7_s1_0': 7,
'init7_s1_1': 7,
'size': 7,
'size_1': 7,
'x': 1,
'y': 1}
_known_devices={'_onx_gather_size': -1,
'_onx_gather_size2': -1,
'_onx_gather_size_1': -1,
'_onx_gather_size_12': -1,
'getitem': -1,
'getitem_1': -1,
'getitem_2': -1,
'getitem_3': -1,
'size': -1,
'size_1': -1,
'x': -1,
'y': -1}
_context=[]
_known_value_shape={'_onx_gather_size': ('DYN0',),
'_onx_gather_size2': ('DYN1',),
'_onx_gather_size_1': ('DYN2',),
'_onx_gather_size_12': ('DYN3',),
'getitem': 'DYN0',
'getitem_1': 'DYN1',
'getitem_2': 'DYN2',
'getitem_3': 'DYN3',
'init7_s1_0': (0,),
'init7_s1_1': (1,),
'size': ('DYN0', 'DYN1'),
'size_1': ('DYN2', 'DYN3')}
_known_constants=['init7_s1_0', 'init7_s1_1']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
empty -> {scancc}
getitem -> {empty}
getitem_1 -> set()
getitem_2 -> {empty}
getitem_3 -> set()
size -> {getitem_1, getitem}
size_1 -> {getitem_3, getitem_2}
x -> {size_2, size, scancc}
y -> {size_1, scancc}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([3, 4])), ('val', torch.float32, torch.Size([s26, s49])))) --- 1:2:('DYN0', 'DYN1'):
y: ('run_node', (('example_value', torch.float32, torch.Size([5, 4])), ('val', torch.float32, torch.Size([s93, s70])))) --- 1:2:('DYN2', 'DYN3'):
size: ('run_node', ('', '')) --- 7:1:(2,):
getitem: ('run_node', ('', '')) --- 7:0:():
getitem_1: ('run_node', ('', '')) --- 7:0:():
size_1: ('run_node', ('', '')) --- 7:1:(2,):
getitem_2: ('run_node', ('', '')) --- 7:0:():
getitem_3: ('run_node', ('', '')) --- 7:0:():
empty: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
ControlFlowScanInplace_153705()
def forward(self, x, y):
size = x.size()
getitem = size[0]
getitem_1 = size[1]; size = getitem_1 = None
size_1 = y.size()
getitem_2 = size_1[0]
getitem_3 = size_1[1]; size_1 = getitem_3 = None
empty = torch.empty((getitem, getitem_2)); getitem = getitem_2 = None
size_2 = x.size()
getitem_4 = size_2[0]
getitem_5 = size_2[1]; size_2 = getitem_5 = None
arange = torch.arange(getitem_4, dtype = torch.int64); getitem_4 = None
_cb_scan_loop_body_1_0 = self._cb_scan_loop_body_1_0
scancc = torch.ops.higher_order.scan(_cb_scan_loop_body_1_0, [empty], [arange], [x, y]); _cb_scan_loop_body_1_0 = empty = arange = x = y = None
getitem_6 = scancc[0]; scancc = None
return getitem_6
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%size : [num_users=2] = call_method[target=size](args = (%x,), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%size, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%size, 1), kwargs = {})
%size_1 : [num_users=2] = call_method[target=size](args = (%y,), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%size_1, 0), kwargs = {})
%getitem_3 : [num_users=0] = call_function[target=operator.getitem](args = (%size_1, 1), kwargs = {})
%empty : [num_users=1] = call_function[target=torch.empty](args = ((%getitem, %getitem_2),), kwargs = {})
%size_2 : [num_users=2] = call_method[target=size](args = (%x,), kwargs = {})
%getitem_4 : [num_users=1] = call_function[target=operator.getitem](args = (%size_2, 0), kwargs = {})
%getitem_5 : [num_users=0] = call_function[target=operator.getitem](args = (%size_2, 1), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.arange](args = (%getitem_4,), kwargs = {dtype: torch.int64})
%_cb_scan_loop_body_1_0 : [num_users=1] = get_attr[target=_cb_scan_loop_body_1_0]
%scancc : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%_cb_scan_loop_body_1_0, [%empty], [%arange], [%x, %y]), kwargs = {})
%getitem_6 : [num_users=1] = call_function[target=operator.getitem](args = (%scancc, 0), kwargs = {})
return getitem_6
-- process.inputs_to_remove --
set()
-- process.progress --
node 8/17 target=<built-in method empty of type object at 0x7e28f4744240>
-- 2 INPUTS
[GraphBuilder-SII.1.make_tensor_input] x[1:DYN0xDYN1]
[GraphBuilder-SII.1.make_tensor_input] y[1:DYN2xDYN3]
-- 2 INITIALIZERS
[GraphBuilder-SII.1.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-SII.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-SII.4.make_node] .sizeA [@:@ ] Shape:['x']->['size']
[GraphBuilder-SII.4.make_node] getitemB_index [@#:@ ] Gather:['size', 'init7_s1_0']->['_onx_gather_size']
[GraphBuilder-SII.4.make_node] getitemB_index2 [@#:@ ] Squeeze:['_onx_gather_size', 'init7_s1_0']->['getitem']
[GraphBuilder-SII.4.make_node] getitemB_index3 [@#:@ ] Gather:['size', 'init7_s1_1']->['_onx_gather_size2']
[GraphBuilder-SII.4.make_node] getitemB_index4 [@#:@ ] Squeeze:['_onx_gather_size2', 'init7_s1_0']->['getitem_1']
[GraphBuilder-SII.4.make_node] .sizeA2 [@:@ ] Shape:['y']->['size_1']
[GraphBuilder-SII.4.make_node] getitemB_index5 [@#:@ ] Gather:['size_1', 'init7_s1_0']->['_onx_gather_size_1']
[GraphBuilder-SII.4.make_node] getitemB_index6 [@#:@ ] Squeeze:['_onx_gather_size_1', 'init7_s1_0']->['getitem_2']
[GraphBuilder-SII.4.make_node] getitemB_index7 [@#:@ ] Gather:['size_1', 'init7_s1_1']->['_onx_gather_size_12']
[GraphBuilder-SII.4.make_node] getitemB_index8 [@#:@ ] Squeeze:['_onx_gather_size_12', 'init7_s1_0']->['getitem_3']
-- 0 OUTPUTS
[GraphBuilder-SII] Message completed, there are 2 initializers, 10 nodes, 2 inputs, 2 outputs.,
new-tracing#
FAILED
empty(): argument 'size' (position 1) must be tuple of ints, but found element of type TracingInt at pos 0
ControlFlowShapeCheck#
code: yobx.torch.testing._model_eval_cases.ControlFlowShapeCheck
forward#
def forward(self, x, y):
x1 = x + 1
y1 = y + 2
cat = torch.cat([x1, y1], dim=1)
torch._check(cat.shape[0] > 0, "batch size must be positive")
if cat.shape[0] > 2:
return cat / cat.shape[0]
return cat / cat.ndim
yobx#
FAILED
Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".
- Not all values of batch = L['x'].size()[0] in the specified range satisfy the generated guard 3 <= L['x'].size()[0] and L['x'].size()[0] <= IntInfinity()
Suggested fixes:
batch = Dim('batch', min=3)
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
inputs:
#2[(T1s3x4,T1s3x4),(T1s5x4,T1s5x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
input: name='y' type=dtype('float32') shape=['batch', 'seq']
init: name='scalar_tensor_default' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default_1' type=float32 shape=() -- array([2.], dtype=float32)
Add(x, scalar_tensor_default) -> add
Shape(y, end=1, start=0) -> val_1
Squeeze(val_1) -> sym_size_int_5
Cast(sym_size_int_5, to=1) -> scalar_tensor_default_2
Add(y, scalar_tensor_default_1) -> add_4
Concat(add, add_4, axis=1) -> cat
Div(cat, scalar_tensor_default_2) -> div
output: name='div' type=dtype('float32') shape=['batch', 'seq + 4']
tracing#
FAILED
symbolically traced variables cannot be used as inputs to control flow
new-tracing#
FAILED
# no error found for the failure
ControlFlowWhileDec#
code: yobx.torch.testing._model_eval_cases.ControlFlowWhileDec
forward#
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i > 0
def body_fn(i, x, y):
return i - 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
yobx#
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but tracing inferred a static shape of 3 for dimension inputs['a'].shape[1].
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'ValueError'>: Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but tracing inferred a static shape of 3 for dimension inputs['a'].shape[1].
(Refer to the full stack trace above for more information.)
tracing#
FAILED
[CustomProxy(ci), CustomProxy(a), CustomProxy(b)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'yobx.torch.tracing.CustomProxy'>, <class 'yobx.torch.tracing.CustomProxy'>, <class 'yobx.torch.tracing.CustomProxy'>)
new-tracing#
FAILED
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 249, in _while_loop_op_wrapper
return while_loop_op(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
ControlFlowWhileInc#
code: yobx.torch.testing._model_eval_cases.ControlFlowWhileInc
forward#
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i < x.size(0)
def body_fn(i, x, y):
return i + 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
yobx#
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but tracing inferred a static shape of 3 for dimension inputs['a'].shape[1].
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'ValueError'>: Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but tracing inferred a static shape of 3 for dimension inputs['a'].shape[1].
(Refer to the full stack trace above for more information.)
tracing#
FAILED
[CustomProxy(ci), CustomProxy(a), CustomProxy(b)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'yobx.torch.tracing.CustomProxy'>, <class 'yobx.torch.tracing.CustomProxy'>, <class 'yobx.torch.tracing.CustomProxy'>)
new-tracing#
FAILED
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 249, in _while_loop_op_wrapper
return while_loop_op(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
CreateFromShape#
code: yobx.torch.testing._model_eval_cases.CreateFromShape
forward#
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
yobx#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='SqueezeBinaryUnsqueezePattern_init7_s_1' type=int64 shape=(1,) -- array([1])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s_1)##init7_s_1/shape_type_compute._cast_inputs.1(add)##init7_s1_0/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Shape(x, end=2, start=1) -> x::Shape1:2
Add(x::Shape1:2, SqueezeBinaryUnsqueezePattern_init7_s_1) -> add::UnSq0
Concat(x::Shape:1, add::UnSq0, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy']
dynamo-ir#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='val_10' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_2' type=int64 shape=() -- array([1])
init: name='val_5' type=int64 shape=(1,) -- array([-1])
Shape(x, end=1, start=0) -> val_0
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Add(sym_size_int_3, val_2) -> add
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_0, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['dx', 'dy + 1']
tracing#
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
Shape(x) -> size
Split(size, axis=0, num_outputs=2) -> _onx_gather_size, _onx_gather_size_12
Squeeze(_onx_gather_size_12, init7_s1_0) -> getitem_3
Add(getitem_3, init7_s_1) -> _onx_add_getitem_3
Unsqueeze(_onx_add_getitem_3, init7_s1_0) -> add::UnSq0
Concat(_onx_gather_size, add::UnSq0, axis=0) -> _onx_concat_getitem::UnSq0
ConstantOfShape(_onx_concat_getitem::UnSq0, value=[1.0]) -> output
output: name='output' type=dtype('float32') shape=['dx', 'add']
new-tracing#
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type TracingInt at pos 0
CreateFromShapeThroughFunction#
code: yobx.torch.testing._model_eval_cases.CreateFromShapeThroughFunction
forward#
def forward(self, x):
def add_one(dim):
return dim + 1
dy1 = add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
yobx#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='SqueezeBinaryUnsqueezePattern_init7_s_1' type=int64 shape=(1,) -- array([1])-- GraphBuilder.constant_folding.from/fold(init7_s1_0,init7_s_1)##init7_s_1/shape_type_compute._cast_inputs.1(add)##init7_s1_0/Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Shape(x, end=2, start=1) -> x::Shape1:2
Add(x::Shape1:2, SqueezeBinaryUnsqueezePattern_init7_s_1) -> add::UnSq0
Concat(x::Shape:1, add::UnSq0, axis=0) -> _onx_concat_sym_size_int_2::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_2::UnSq0, value=[1.0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['dx', 'dy']
dynamo-ir#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='val_10' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_2' type=int64 shape=() -- array([1])
init: name='val_5' type=int64 shape=(1,) -- array([-1])
Shape(x, end=1, start=0) -> val_0
Shape(x, end=2, start=1) -> val_1
Squeeze(val_1) -> sym_size_int_3
Add(sym_size_int_3, val_2) -> add
Reshape(add, val_5, allowzero=0) -> val_6
Concat(val_0, val_6, axis=0) -> val_7
Expand(val_10, val_7) -> ones
output: name='ones' type=dtype('float32') shape=['dx', 'dy + 1']
tracing#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['dx', 'dy']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- shape_type_compute._cast_inputs.1(add)
Shape(x) -> size
Split(size, axis=0, num_outputs=2) -> _onx_gather_size_1, _onx_gather_size2
Squeeze(_onx_gather_size2, init7_s1_0) -> getitem_1
Add(getitem_1, init7_s_1) -> _onx_add_getitem_1
Unsqueeze(_onx_add_getitem_1, init7_s1_0) -> add::UnSq0
Concat(_onx_gather_size_1, add::UnSq0, axis=0) -> _onx_concat_getitem_2::UnSq0
ConstantOfShape(_onx_concat_getitem_2::UnSq0, value=[1.0]) -> output
output: name='output' type=dtype('float32') shape=['dx', 'add']
new-tracing#
FAILED
ones(): argument 'size' (position 1) must be tuple of ints, but found element of type TracingInt at pos 0
CropLastDimensionWithTensorContent#
code: yobx.torch.testing._model_eval_cases.CropLastDimensionWithTensorContent
forward#
def forward(self, x, shape):
return x[..., : shape.item()]
yobx#
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Slice(x, init7_s1_0, shape, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'item']
dynamo-ir#
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='val_11' type=int64 shape=(1,) -- array([2])
init: name='val_0' type=int64 shape=(1,) -- array([-1])
init: name='val_2' type=int64 shape=() -- array([0])
init: name='val_4' type=int64 shape=(1,) -- array([0])
init: name='val_12' type=int64 shape=(1,) -- array([1])
Reshape(shape, val_0, allowzero=0) -> val_1
Gather(val_1, val_2, axis=0) -> val_3
Reshape(val_3, val_0, allowzero=0) -> val_7
Slice(x, val_4, val_7, val_11, val_12) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 4, 'u1']
tracing#
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='shape' type=dtype('int64') shape=[1]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Slice(x, init7_s1_0, shape, getitem_axis, getitem_step) -> output
output: name='output' type=dtype('float32') shape=['batch', 4, 'item']
new-tracing#
FAILED
Cannot determine if it is a constant argument SymInt
CropLastDimensionWithTensorShape#
code: yobx.torch.testing._model_eval_cases.CropLastDimensionWithTensorShape
forward#
def forward(self, x, y):
return x[..., : y.shape[0]]
yobx#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
Shape(y, end=1, start=0) -> y::Shape:1
Slice(x, init7_s1_0, y::Shape:1, init7_s1_2) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4, 'crop']
dynamo-ir#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='val_8' type=int64 shape=(1,) -- array([2])
init: name='val_1' type=int64 shape=(1,) -- array([0])
init: name='val_9' type=int64 shape=(1,) -- array([1])
Shape(y, end=1, start=0) -> val_0
Slice(x, val_1, val_0, val_8, val_9) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 4, 'crop']
tracing#
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4, 4]
input: name='y' type=dtype('float32') shape=['crop']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='getitem_1_axis' type=int64 shape=(1,) -- array([-1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_1_step' type=int64 shape=(1,) -- array([1]) -- DynamoInterpreter._getitem_slice.3
Shape(y) -> size
Gather(size, init7_s1_0) -> _onx_gather_size
Slice(x, init7_s1_0, _onx_gather_size, getitem_1_axis, getitem_1_step) -> output
output: name='output' type=dtype('float32') shape=['batch', 4, 'crop']
new-tracing#
FAILED
TracingInt('crop') has no concrete integer value; pass a concrete int or check .value
ExportWithDimension0#
code: yobx.torch.testing._model_eval_cases.ExportWithDimension0
forward#
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
yobx#
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 'channel_1']
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- ReshapeIsSqueezePattern.m1
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2) -> sym_size_int_2
Cast(sym_size_int_2, to=1) -> sym_size_int_2::C1
Range(init1_s_, sym_size_int_2::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_1', 1]
dynamo-ir#
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 's27']
init: name='val_3' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_9' type=int64 shape=(2,) -- array([-1, 1])
Shape(x, end=2, start=1) -> val_0
Squeeze(val_0) -> sym_size_int_5
Cast(sym_size_int_5, to=1) -> val_1
Range(val_3, val_1, val_5) -> arange
Reshape(arange, val_9, allowzero=1) -> view
MatMul(x, view) -> matmul
output: name='matmul' type=dtype('float32') shape=['s77', 1]
tracing#
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 'channel_1']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
Shape(x) -> size
Gather(size, init7_s1_1) -> _onx_gather_size2
Squeeze(_onx_gather_size2, init7_s1_0) -> getitem_1
Cast(getitem_1, to=1) -> getitem_1::C1
Range(init1_s_, getitem_1::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output
output: name='output' type=dtype('float32') shape=['batch_1', 1]
new-tracing#
FAILED
arange() received an invalid combination of arguments - got (TracingInt, dtype=torch.dtype), but expected one of:
* (Number end, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (Number start, Number end, *, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (Number start, Number end, Number step = 1, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
ExportWithDimension1#
code: yobx.torch.testing._model_eval_cases.ExportWithDimension1
forward#
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
yobx#
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 'channel_1']
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- ReshapeIsSqueezePattern.m1
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2) -> sym_size_int_2
Cast(sym_size_int_2, to=1) -> sym_size_int_2::C1
Range(init1_s_, sym_size_int_2::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_1', 1]
dynamo-ir#
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 's27']
init: name='val_3' type=float32 shape=() -- array([0.], dtype=float32)
init: name='val_5' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_9' type=int64 shape=(2,) -- array([-1, 1])
Shape(x, end=2, start=1) -> val_0
Squeeze(val_0) -> sym_size_int_5
Cast(sym_size_int_5, to=1) -> val_1
Range(val_3, val_1, val_5) -> arange
Reshape(arange, val_9, allowzero=1) -> view
MatMul(x, view) -> matmul
output: name='matmul' type=dtype('float32') shape=['s77', 1]
tracing#
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_1', 'channel_1']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init1_s_' type=float32 shape=() -- array([0.], dtype=float32)-- Opset.make_node.1/Small
init: name='init1_s_2' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
Shape(x) -> size
Gather(size, init7_s1_1) -> _onx_gather_size2
Squeeze(_onx_gather_size2, init7_s1_0) -> getitem_1
Cast(getitem_1, to=1) -> getitem_1::C1
Range(init1_s_, getitem_1::C1, init1_s_2) -> arange
Unsqueeze(arange, init7_s1_1) -> reshape
MatMul(x, reshape) -> output
output: name='output' type=dtype('float32') shape=['batch_1', 1]
new-tracing#
FAILED
arange() received an invalid combination of arguments - got (TracingInt, dtype=torch.dtype), but expected one of:
* (Number end, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (Number start, Number end, *, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (Number start, Number end, Number step = 1, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
ExportWithNewConstant#
code: yobx.torch.testing._model_eval_cases.ExportWithNewConstant
forward#
def forward(self, x):
new_shape = (x.shape[0], 1)
ones = torch.ones(new_shape, dtype=x.dtype, device=x.device)
return torch.cat([x, ones], dim=1)
yobx#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- GraphBuilder.make_shape_from_results.conc
Shape(x, end=1, start=0) -> x::Shape:1
Concat(x::Shape:1, init7_s1_1, axis=0) -> _onx_concat_sym_size_int_1::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_1::UnSq0, value=[1.0]) -> ones
Concat(x, ones, axis=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'seq+1']
dynamo-ir#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='val_7' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Shape(x, end=1, start=0) -> val_0
Concat(val_0, val_3, axis=0) -> val_4
Expand(val_7, val_4) -> ones
Concat(x, ones, axis=1) -> cat
output: name='cat' type=dtype('float32') shape=['batch', 'seq + 1']
tracing#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##GraphBuilder.make_shape_from_results.conc
Shape(x) -> size
Gather(size, init7_s1_0) -> _onx_gather_size
Concat(_onx_gather_size, init7_s1_1, axis=0) -> _onx_concat_getitem::UnSq0
ConstantOfShape(_onx_concat_getitem::UnSq0, value=[1.0]) -> ones
Concat(x, ones, axis=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 'seq+1']
new-tracing#
FAILED
ones() received an invalid combination of arguments - got (tuple, device=torch.device, dtype=torch.dtype), but expected one of:
* (tuple of ints size, *, tuple of names names, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (tuple of ints size, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
ExportWithNewConstantTo#
code: yobx.torch.testing._model_eval_cases.ExportWithNewConstantTo
forward#
def forward(self, x):
new_shape = (x.shape[0], 1)
ones = torch.ones(new_shape, dtype=x.dtype)
return torch.cat([x, ones.to(x.device)], dim=1)
yobx#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- GraphBuilder.make_shape_from_results.conc
Shape(x, end=1, start=0) -> x::Shape:1
Concat(x::Shape:1, init7_s1_1, axis=0) -> _onx_concat_sym_size_int_1::UnSq0
ConstantOfShape(_onx_concat_sym_size_int_1::UnSq0, value=[1.0]) -> ones
Concat(x, ones, axis=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'seq+1']
dynamo-ir#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='val_7' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Shape(x, end=1, start=0) -> val_0
Concat(val_0, val_3, axis=0) -> val_4
Expand(val_7, val_4) -> ones
Concat(x, ones, axis=1) -> cat
output: name='cat' type=dtype('float32') shape=['batch', 'seq + 1']
tracing#
inputs:
#2[(T1s4x4,),(T1s5x6,)]shapes:
dict(x:{0:Dim(batch),1:Dim(seq)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 'seq']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##GraphBuilder.make_shape_from_results.conc
Shape(x) -> size
Gather(size, init7_s1_0) -> _onx_gather_size
Concat(_onx_gather_size, init7_s1_1, axis=0) -> _onx_concat_getitem::UnSq0
ConstantOfShape(_onx_concat_getitem::UnSq0, value=[1.0]) -> ones
Concat(x, ones, axis=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 'seq+1']
new-tracing#
FAILED
ones() received an invalid combination of arguments - got (tuple, dtype=torch.dtype), but expected one of:
* (tuple of ints size, *, tuple of names names, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
* (tuple of ints size, *, Tensor out = None, torch.dtype dtype = None, torch.layout layout = None, torch.device device = None, bool pin_memory = False, bool requires_grad = False)
InplaceAdd#
code: yobx.torch.testing._model_eval_cases.InplaceAdd
forward#
def forward(self, x):
x += self.bias
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
FAILED
'DynamoInterpreter' object has no attribute 'TracingInt'
InplaceAdd2#
code: yobx.torch.testing._model_eval_cases.InplaceAdd2
forward#
def forward(self, x):
x.add_(self.bias)
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_3
output: name='add_3' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
FAILED
'DynamoInterpreter' object has no attribute 'TracingInt'
InplaceAdd_Mul#
code: yobx.torch.testing._model_eval_cases.InplaceAdd_Mul
forward#
def forward(self, x):
x.add_(self.bias)
return x * 2
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
Add(x, c_bias) -> add_
Mul(add_, init1_s_::RSh1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
Add(x, bias) -> add_3
Mul(add_3, scalar_tensor_default) -> mul_4
output: name='mul_4' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul)##init7_s1_1/Opset.make_node.1/Shape
Add(x, bias) -> add_
Mul(add_, init1_s_::RSh1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
FAILED
'DynamoInterpreter' object has no attribute 'TracingInt'
InplaceCloneAdd#
code: yobx.torch.testing._model_eval_cases.InplaceCloneAdd_
forward#
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
yobx#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='c_bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.placeholder.0
Add(x, c_bias) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)
Add(x, bias) -> add_6
output: name='add_6' type=dtype('float32') shape=['batch', 4]
tracing#
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
init: name='bias' type=float32 shape=(1, 4) -- array([1., 1., 1., 1.], dtype=float32)-- DynamoInterpret.get_attr.0
Add(x, bias) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
FAILED
'DynamoInterpreter' object has no attribute 'TracingInt'
InplaceSetItemEllipsis_1#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemEllipsis_1
forward#
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
yobx#
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
E0409 00:10:36.503000 15502 torch/_guards.py:370] Error while creating guard: E0409 00:10:36.503000 15502 torch/_guards.py:370] Name: ‘’ E0409 00:10:36.503000 15502 torch/_guards.py:370] Source: shape_env E0409 00:10:36.503000 15502 torch/_guards.py:370] Create Function: SHAPE_ENV E0409 00:10:36.503000 15502 torch/_guards.py:370] Guard Types: None E0409 00:10:36.503000 15502 torch/_guards.py:370] Code List: None E0409 00:10:36.503000 15502 torch/_guards.py:370] Object Weakref: None E0409 00:10:36.503000 15502 torch/_guards.py:370] Guarded Class Weakref: None E0409 00:10:36.503000 15502 torch/_guards.py:370] Traceback (most recent call last): E0409 00:10:36.503000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_guards.py”, line 368, in create E0409 00:10:36.503000 15502 torch/_guards.py:370] return self.create_fn(builder, self) E0409 00:10:36.503000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:10:36.503000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3112, in SHAPE_ENV E0409 00:10:36.503000 15502 torch/_guards.py:370] python_code_parts, verbose_code_parts = _get_code_parts( E0409 00:10:36.503000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^ E0409 00:10:36.503000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3085, in _get_code_parts E0409 00:10:36.503000 15502 torch/_guards.py:370] return output_graph.shape_env.produce_guards_verbose( E0409 00:10:36.503000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:10:36.503000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 5842, in produce_guards_verbose E0409 00:10:36.503000 15502 torch/_guards.py:370] raise ConstraintViolationError( E0409 00:10:36.503000 15502 torch/_guards.py:370] torch.fx.experimental.symbolic_shapes.ConstraintViolationError: L[‘flat_args’][1].size()[0] = 8192 is not equal to L[‘flat_args’][0].size()[0] = 4 E0409 00:10:36.562000 15502 torch/_guards.py:372] Created at: E0409 00:10:36.562000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/convert_frame.py”, line 832, in trace_frame E0409 00:10:36.562000 15502 torch/_guards.py:372] tracer = InstructionTranslator( E0409 00:10:36.562000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/symbolic_convert.py”, line 4790, in __init__ E0409 00:10:36.562000 15502 torch/_guards.py:372] output=OutputGraph( E0409 00:10:36.562000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 705, in __init__ E0409 00:10:36.562000 15502 torch/_guards.py:372] self.init_ambient_guards() E0409 00:10:36.562000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 990, in init_ambient_guards E0409 00:10:36.562000 15502 torch/_guards.py:372] self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
inputs:
#1[(T7s4,T1s8192x4)]shapes:
dict(index:{0:Dim(batch)},update:{0:Dim(batch),1:DYNAMIC})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='index' type=dtype('int64') shape=['batch']
input: name='update' type=dtype('float32') shape=['batch', 'channel_1']
init: name='_tensor_constant0' type=float32 shape=(1, 8192, 4) -- DynamoInterpret.get_attr.0
aten_setitem[aten](_tensor_constant0, index, update) -> output
output: name='output' type=dtype('float32') shape=[1, 8192, 4]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: '_tensor_constant0'
input: 'index'
input: 'update'
Constant(value=[1, 1, -1]) -> init7_s3_1_1_-1
Reshape(index, init7_s3_1_1_-1) -> index::RSh1x1x-1
Shape(_tensor_constant0, end=2, start=0) -> _tensor_constant0::Shape:2
Shape(index) -> index::Shape:
Concat(_tensor_constant0::Shape:2, index::Shape:, axis=0) -> _onx_concat__tensor_constant0::Shape:2
Expand(index::RSh1x1x-1, _onx_concat__tensor_constant0::Shape:2) -> _onx_expand_index::RSh1x1x-1
Expand(update, _onx_concat__tensor_constant0::Shape:2) -> _onx_expand_update
ScatterElements(_tensor_constant0, _onx_expand_index::RSh1x1x-1, _onx_expand_update, axis=2) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Cannot determine if it is a constant argument None
InplaceSetItemEllipsis_2#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemEllipsis_2
forward#
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
yobx#
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
E0409 00:10:38.090000 15502 torch/_guards.py:370] Error while creating guard: E0409 00:10:38.090000 15502 torch/_guards.py:370] Name: ‘’ E0409 00:10:38.090000 15502 torch/_guards.py:370] Source: shape_env E0409 00:10:38.090000 15502 torch/_guards.py:370] Create Function: SHAPE_ENV E0409 00:10:38.090000 15502 torch/_guards.py:370] Guard Types: None E0409 00:10:38.090000 15502 torch/_guards.py:370] Code List: None E0409 00:10:38.090000 15502 torch/_guards.py:370] Object Weakref: None E0409 00:10:38.090000 15502 torch/_guards.py:370] Guarded Class Weakref: None E0409 00:10:38.090000 15502 torch/_guards.py:370] Traceback (most recent call last): E0409 00:10:38.090000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_guards.py”, line 368, in create E0409 00:10:38.090000 15502 torch/_guards.py:370] return self.create_fn(builder, self) E0409 00:10:38.090000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:10:38.090000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3112, in SHAPE_ENV E0409 00:10:38.090000 15502 torch/_guards.py:370] python_code_parts, verbose_code_parts = _get_code_parts( E0409 00:10:38.090000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^ E0409 00:10:38.090000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/guards.py”, line 3085, in _get_code_parts E0409 00:10:38.090000 15502 torch/_guards.py:370] return output_graph.shape_env.produce_guards_verbose( E0409 00:10:38.090000 15502 torch/_guards.py:370] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E0409 00:10:38.090000 15502 torch/_guards.py:370] File “~/vv/this312/lib/python3.12/site-packages/torch/fx/experimental/symbolic_shapes.py”, line 5842, in produce_guards_verbose E0409 00:10:38.090000 15502 torch/_guards.py:370] raise ConstraintViolationError( E0409 00:10:38.090000 15502 torch/_guards.py:370] torch.fx.experimental.symbolic_shapes.ConstraintViolationError: L[‘flat_args’][1].size()[0] = 8192 is not equal to L[‘flat_args’][0].size()[0] = 4 E0409 00:10:38.091000 15502 torch/_guards.py:372] Created at: E0409 00:10:38.091000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/convert_frame.py”, line 832, in trace_frame E0409 00:10:38.091000 15502 torch/_guards.py:372] tracer = InstructionTranslator( E0409 00:10:38.091000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/symbolic_convert.py”, line 4790, in __init__ E0409 00:10:38.091000 15502 torch/_guards.py:372] output=OutputGraph( E0409 00:10:38.091000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 705, in __init__ E0409 00:10:38.091000 15502 torch/_guards.py:372] self.init_ambient_guards() E0409 00:10:38.091000 15502 torch/_guards.py:372] File “~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/output_graph.py”, line 990, in init_ambient_guards E0409 00:10:38.091000 15502 torch/_guards.py:372] self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
inputs:
#1[(T7s4,T1s8192x4)]shapes:
dict(index:{0:Dim(batch)},update:{0:Dim(batch),1:DYNAMIC})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='index' type=dtype('int64') shape=['batch']
input: name='update' type=dtype('float32') shape=['batch', 'channel_1']
init: name='_tensor_constant0' type=float32 shape=(1, 8192, 6) -- DynamoInterpret.get_attr.0
aten_setitem[aten](_tensor_constant0, index, update) -> output
output: name='output' type=dtype('float32') shape=[1, 8192, 6]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: '_tensor_constant0'
input: 'index'
input: 'update'
Constant(value=[1, 1, -1]) -> init7_s3_1_1_-1
Reshape(index, init7_s3_1_1_-1) -> index::RSh1x1x-1
Shape(_tensor_constant0, end=2, start=0) -> _tensor_constant0::Shape:2
Shape(index) -> index::Shape:
Concat(_tensor_constant0::Shape:2, index::Shape:, axis=0) -> _onx_concat__tensor_constant0::Shape:2
Expand(index::RSh1x1x-1, _onx_concat__tensor_constant0::Shape:2) -> _onx_expand_index::RSh1x1x-1
Expand(update, _onx_concat__tensor_constant0::Shape:2) -> _onx_expand_update
ScatterElements(_tensor_constant0, _onx_expand_index::RSh1x1x-1, _onx_expand_update, axis=2) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Cannot determine if it is a constant argument None
InplaceSetItemExp#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemExp
forward#
def forward(self, x):
K_33 = x.clone()
torch.exp_(K_33[2:-2, 2:-2, :-1])
return K_33
yobx#
FAILED
Constraints violated (batch)! For more information, run with TORCH_LOGS="+dynamic".
- Not all values of batch = L['x'].size()[0] in the specified range satisfy the generated guard 6 <= L['x'].size()[0] and L['x'].size()[0] <= IntInfinity()
Suggested fixes:
batch = Dim('batch', min=6)
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
inputs:
#2[(T1s7x9x11,),(T1s8x9x11,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 9, 11]
init: name='val_4' type=int64 shape=(1,) -- array([2])
init: name='val_8' type=int64 shape=(1,) -- array([-2])
init: name='val_12' type=int64 shape=(1,) -- array([0])
init: name='val_24' type=int64 shape=(1,) -- array([1])
init: name='val_32' type=int64 shape=(1,) -- array([-1])
init: name='val_1' type=int64 shape=() -- array([2])
init: name='val_9' type=int64 shape=() -- array([0])
init: name='val_21' type=int64 shape=() -- array([1])
Shape(x, start=0) -> val_86
Gather(val_86, val_9, axis=0) -> val_87
Range(val_9, val_87, val_21) -> val_88
Slice(val_88, val_4, val_8, val_12, val_24) -> val_92
Unsqueeze(val_92, val_32) -> val_93
Slice(x, val_4, val_8, val_12, val_24) -> slice_1
Slice(slice_1, val_4, val_8, val_24, val_24) -> slice_2
Slice(slice_2, val_12, val_32, val_4, val_24) -> slice_3
Exp(slice_3) -> exp
Transpose(exp, perm=[2,1,0]) -> val_70
Shape(slice_2, start=0) -> val_61
Gather(val_61, val_1, axis=0) -> val_62
Range(val_9, val_62, val_21) -> val_63
Slice(val_63, val_12, val_32, val_12, val_24) -> val_67
Unsqueeze(val_67, val_32) -> val_69
Transpose(slice_2, perm=[2,1,0]) -> val_71
ScatterND(val_71, val_69, val_70, reduction=b'none') -> val_72
Transpose(val_72, perm=[1,2,0]) -> val_82
Shape(slice_1, start=0) -> val_74
Gather(val_74, val_21, axis=0) -> val_75
Range(val_9, val_75, val_21) -> val_76
Slice(val_76, val_4, val_8, val_12, val_24) -> val_80
Unsqueeze(val_80, val_32) -> val_81
Transpose(slice_1, perm=[1,0,2]) -> val_83
ScatterND(val_83, val_81, val_82, reduction=b'none') -> val_84
Transpose(val_84, perm=[1,0,2]) -> slice_scatter_1
ScatterND(x, val_93, slice_scatter_1, reduction=b'none') -> slice_scatter_2
output: name='slice_scatter_2' type=dtype('float32') shape=['batch', 9, 11]
tracing#
inputs:
#2[(T1s7x9x11,),(T1s8x9x11,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 9, 11]
init: name='init7_s3_2_2_0' type=int64 shape=(3,) -- array([2, 2, 0]) -- Opset.make_node.1/Shape
init: name='init7_s3_-2_-2_-1' type=int64 shape=(3,) -- array([-2, -2, -1])-- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2' type=int64 shape=(3,) -- array([0, 1, 2]) -- Opset.make_node.1/Shape
init: name='init7_s6_' type=int64 shape=(6,) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_' type=float32 shape=() -- array([1.], dtype=float32)-- Opset.make_node.1/Small
Slice(x, init7_s3_2_2_0, init7_s3_-2_-2_-1, init7_s3_0_1_2) -> _onx_slice_clone
Exp(_onx_slice_clone) -> _onx_exp_slice_clone
Shape(_onx_exp_slice_clone) -> _onx_exp_slice_clone::Shape:
ConstantOfShape(_onx_exp_slice_clone::Shape:, value=[0.0]) -> _onx_constantofshape_exp_slice_clone::Shape:
Pad(_onx_constantofshape_exp_slice_clone::Shape:, init7_s6_, init1_s_) -> clone_mask
Mul(x, clone_mask) -> _onx_mul_clone
Pad(_onx_exp_slice_clone, init7_s6_) -> _onx_exp_slice_clone_padded
Add(_onx_mul_clone, _onx_exp_slice_clone_padded) -> output
output: name='output' type=dtype('float32') shape=['batch', 9, 11]
new-tracing#
FAILED
Unable to interpret function <class 'torch._ops.OpOverload'>: <OpOverload(op='aten.exp_', overload='default')>, searched for ['aten_exp_', 'exp__default'] and attributes ['__qualname__', '__name__'], args=(slice_tensor_2,), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-TKQ] Message starts, there are 5 initializers, 4 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--CONSTRAINTS--
batch-4 = {'u1'}
u1 = {'batch-4'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
batch-4 = 'batch-4'
u1 = TracingInt('u1')
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'clone_default': ('batch', 9, 11),
'init7_s1_-1': (1,),
'init7_s1_-2': (1,),
'init7_s1_0': (1,),
'init7_s1_1': (1,),
'init7_s1_2': (1,),
'slice_tensor': ('batch-4', 9, 11),
'slice_tensor_1': ('batch-4', 5, 11),
'slice_tensor_2': ('batch-4', 5, 10),
'x': ('batch', 9, 11)}
_known_types={'clone_default': 1,
'init7_s1_-1': 7,
'init7_s1_-2': 7,
'init7_s1_0': 7,
'init7_s1_1': 7,
'init7_s1_2': 7,
'slice_tensor': 1,
'slice_tensor_1': 1,
'slice_tensor_2': 1,
'x': 1}
_known_devices={'clone_default': -1,
'slice_tensor': -1,
'slice_tensor_1': -1,
'slice_tensor_2': -1,
'x': -1}
_context=[]
_known_value_shape={'init7_s1_-1': (-1,),
'init7_s1_-2': (-2,),
'init7_s1_0': (0,),
'init7_s1_1': (1,),
'init7_s1_2': (2,)}
_known_constants=['init7_s1_-1', 'init7_s1_-2', 'init7_s1_0', 'init7_s1_1', 'init7_s1_2']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
clone_default -> {slice_tensor, output}
exp__default -> set()
slice_tensor -> {slice_tensor_1}
slice_tensor_1 -> {slice_tensor_2}
slice_tensor_2 -> {exp__default}
x -> {clone_default}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 9, 11])))) --- 1:3:('batch', 9, 11):
clone_default: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 9, 11])))) --- 1:3:('batch', 9, 11):
slice_tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('u1'), 9, 11])))) --- 1:3:('batch-4', 9, 11):
slice_tensor_1: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('u1'), 5, 11])))) --- 1:3:('batch-4', 5, 11):
slice_tensor_2: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('u1'), 5, 10])))) --- 1:3:('batch-4', 5, 10):
exp__default: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('u1'), 5, 10])))) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.NEW_TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemExp()
def forward(self, x):
clone_default = torch.ops.aten.clone.default(x); x = None
slice_tensor = torch.ops.aten.slice.Tensor(clone_default, 0, 2, -2)
slice_tensor_1 = torch.ops.aten.slice.Tensor(slice_tensor, 1, 2, -2); slice_tensor = None
slice_tensor_2 = torch.ops.aten.slice.Tensor(slice_tensor_1, 2, 0, -1); slice_tensor_1 = None
exp__default = torch.ops.aten.exp_.default(slice_tensor_2); slice_tensor_2 = exp__default = None
return clone_default
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%clone_default : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%clone_default, 0, 2, -2), kwargs = {})
%slice_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_tensor, 1, 2, -2), kwargs = {})
%slice_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_tensor_1, 2, 0, -1), kwargs = {})
%exp__default : [num_users=0] = call_function[target=torch.ops.aten.exp_.default](args = (%slice_tensor_2,), kwargs = {})
return clone_default
-- process.inputs_to_remove --
set()
-- process.progress --
node 5/7 target=aten.exp_.default
-- 1 INPUTS
[GraphBuilder-TKQ.1.make_tensor_input] x[1:batchx9x11]
-- 5 INITIALIZERS
[GraphBuilder-TKQ.1.make_initializer] init7_s1_2[int64:int64:[2]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-TKQ.1.make_initializer] init7_s1_-2[int64:int64:[-2]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-TKQ.1.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape##Opset.make_node.1/Shape
[GraphBuilder-TKQ.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-TKQ.1.make_initializer] init7_s1_-1[int64:int64:[-1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-TKQ.4.make_node] clone [@:@ ] Identity:['x']->['clone_default']
[GraphBuilder-TKQ.4.make_node] slice_Tensor [@###:@] Slice:['clone_default', 'init7_s1_2', 'init7_s1_-2', 'init7_s1_0']->['slice_tensor']
[GraphBuilder-TKQ.4.make_node] slice_Tensor2 [@###:@] Slice:['slice_tensor', 'init7_s1_2', 'init7_s1_-2', 'init7_s1_1']->['slice_tensor_1']
[GraphBuilder-TKQ.4.make_node] slice_Tensor3 [@###:@] Slice:['slice_tensor_1', 'init7_s1_0', 'init7_s1_-1', 'init7_s1_2']->['slice_tensor_2']
-- 0 OUTPUTS
[GraphBuilder-TKQ] Message completed, there are 5 initializers, 4 nodes, 1 inputs, 1 outputs.,
InplaceSetItemMask#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemMask
forward#
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
yobx#
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='c_lifted_tensor_0' type=float32 shape=() -- array([2.], dtype=float32)-- DynamoInterpret.placeholder.0
Cast(x, to=9) -> to
Where(to, c_lifted_tensor_0, x) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 3, 3]
dynamo-ir#
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
init: name='clone' type=float32 shape=() -- array([2.], dtype=float32)
Cast(x, to=9) -> _to_copy
Where(_to_copy, clone, x) -> index_put
output: name='index_put' type=dtype('float32') shape=['batch', 3, 3]
tracing#
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='x' type=dtype('float32') shape=['batch', 3, 3]
Cast(x, to=9) -> to
aten_setitem[aten](x, to) -> output
output: name='output' type=dtype('float32') shape=['batch', 3, 3]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'x'
input: 'to'
Constant(value=2.0) -> init1_s_
Where(to, init1_s_, x) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Cannot determine if it is a constant argument dtype(torch.bool)
InplaceSetItemSquare#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemSquare
forward#
def forward(self, x):
x[:2, :3] = 1
return x
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Identity(output_0) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_3' type=int64 shape=(1,) -- array([0])
init: name='val_7' type=int64 shape=(1,) -- array([2])
init: name='val_18' type=int64 shape=(1,) -- array([3])
init: name='val_22' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_19' type=int64 shape=() -- array([1])
init: name='val_42' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Slice(val_50, val_3, val_7, val_3, val_22) -> val_54
Unsqueeze(val_54, val_42) -> val_55
Slice(x, val_3, val_7, val_3, val_22) -> slice_1
Slice(slice_1, val_3, val_18, val_22, val_22) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Shape(slice_1, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Slice(val_37, val_3, val_18, val_3, val_22) -> val_41
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_1, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
output: name='slice_scatter_1' type=dtype('float32') shape=['batch', 5]
tracing#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='x' type=dtype('float32') shape=['batch', 5]
aten_setitem[aten](x) -> output
output: name='output' type=dtype('float32') shape=['batch', 5]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'x'
Constant(value=1.0) -> setitem_val
Constant(value=[0]) -> init7_s1_0
Concat(init7_s1_0, init7_s1_0, axis=0) -> _onx_concat_init7_s1_0
Constant(value=2) -> init7_s_2
Constant(value=3) -> init7_s_3
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1, init7_s1_0) -> x::Shape:1::Sq0
Sub(x::Shape:1::Sq0, init7_s_2) -> _onx_sub_x::Shape:1::Sq0
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq0
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2, init7_s1_0) -> x::Shape1:2::Sq0
Sub(x::Shape1:2::Sq0, init7_s_3) -> _onx_sub_x::Shape1:2::Sq0
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq0
Concat(_onx_sub_x::Shape:1::Sq0::UnSq0, _onx_sub_x::Shape1:2::Sq0::UnSq0, axis=0) -> _onx_concat_sub_x::Shape:1::Sq0::UnSq0
Add(_onx_concat_init7_s1_0, _onx_concat_sub_x::Shape:1::Sq0::UnSq0) -> _onx_add_concat_init7_s1_0
Shape(x) -> x::Shape:
Sub(x::Shape:, _onx_add_concat_init7_s1_0) -> _onx_sub_x::Shape:
Expand(setitem_val, _onx_sub_x::Shape:) -> _onx_expand_setitem_val
Shape(_onx_expand_setitem_val) -> _onx_expand_setitem_val::Shape:
ConstantOfShape(_onx_expand_setitem_val::Shape:, value=[0.0]) -> _onx_constantofshape_expand_setitem_val::Shape:
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq02
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq02
Concat(init7_s1_0, init7_s1_0, _onx_sub_x::Shape:1::Sq0::UnSq02, _onx_sub_x::Shape1:2::Sq0::UnSq02, axis=0) -> _onx_concat_init7_s1_02
Pad(_onx_expand_setitem_val, _onx_concat_init7_s1_02) -> _onx_expand_setitem_val_padded
Pad(_onx_constantofshape_expand_setitem_val::Shape:, _onx_concat_init7_s1_02, setitem_val) -> x_mask
Mul(x, x_mask) -> _onx_mul_x
Add(_onx_mul_x, _onx_expand_setitem_val_padded) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Unable to interpret function <class 'torch._ops.OpOverload'>: <OpOverload(op='aten.fill_', overload='Tensor')>, searched for ['aten_fill__Tensor', 'fill__Tensor'] and attributes ['__qualname__', '__name__'], args=(slice_tensor, param_1), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-KHK] Message starts, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'init7_s1_0': (1,),
'init7_s1_1': (1,),
'init7_s1_3': (1,),
'param_1': (),
'slice_tensor': ('batch', 3),
'x': ('batch', 5)}
_known_types={'init7_s1_0': 7,
'init7_s1_1': 7,
'init7_s1_3': 7,
'param_1': 1,
'slice_tensor': 1,
'x': 1}
_known_devices={'slice_tensor': -1, 'x': -1}
_context=[]
_known_value_shape={'init7_s1_0': (0,), 'init7_s1_1': (1,), 'init7_s1_3': (3,)}
_known_constants=['init7_s1_0', 'init7_s1_1', 'init7_s1_3', 'param_1']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
fill__tensor -> set()
param_1 -> {fill__tensor}
slice_tensor -> {fill__tensor}
x -> {output, slice_tensor}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 5])))) --- 1:2:('batch', 5):
slice_tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- 1:2:('batch', 3):
param_1: ('run_node', ('', ('val', torch.float32, TracingShape([])))) --- 1:0:():
fill__tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.NEW_TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquare()
def forward(self, x, param_1):
slice_tensor = torch.ops.aten.slice.Tensor(x, 1, 0, 3)
fill__tensor = torch.ops.aten.fill_.Tensor(slice_tensor, param_1); slice_tensor = param_1 = fill__tensor = None
return x
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=2] = placeholder[target=x]
%slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 1, 0, 3), kwargs = {})
%param_1 : [num_users=1] = placeholder[target=param_1]
%fill__tensor : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_tensor, %param_1), kwargs = {})
return x
-- process.inputs_to_remove --
set()
-- process.progress --
node 3/5 target=aten.fill_.Tensor
-- 1 INPUTS
[GraphBuilder-KHK.1.make_tensor_input] x[1:batchx5]
-- 4 INITIALIZERS
[GraphBuilder-KHK.1.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KHK.1.make_initializer] init7_s1_3[int64:int64:[3]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KHK.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-KHK.1.make_initializer] param_1[torch.float32:torch.float32:[1.0]] - SOURCE: DynamoInterpret.placeholder.0
[GraphBuilder-KHK.4.make_node] slice_Tensor [@###:@] Slice:['x', 'init7_s1_0', 'init7_s1_3', 'init7_s1_1']->['slice_tensor']
-- 0 OUTPUTS
[GraphBuilder-KHK] Message completed, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.,
InplaceSetItemSquareAdd#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemSquareAdd
forward#
def forward(self, x):
x[:2, :3] = 1
return x + 2
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Add(output_0, init1_s_::RSh1) -> output_1
output: name='output_1' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_3' type=int64 shape=(1,) -- array([0])
init: name='val_7' type=int64 shape=(1,) -- array([2])
init: name='val_18' type=int64 shape=(1,) -- array([3])
init: name='val_22' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_19' type=int64 shape=() -- array([1])
init: name='val_42' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Slice(val_50, val_3, val_7, val_3, val_22) -> val_54
Unsqueeze(val_54, val_42) -> val_55
Slice(x, val_3, val_7, val_3, val_22) -> slice_1
Slice(slice_1, val_3, val_18, val_22, val_22) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Shape(slice_1, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Slice(val_37, val_3, val_18, val_3, val_22) -> val_41
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_1, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Add(slice_scatter_1, scalar_tensor_default) -> add_12
output: name='add_12' type=dtype('float32') shape=['batch', 5]
tracing#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape
aten_setitem[aten](x) -> setitem
Add(setitem, init1_s_::RSh1) -> output
output: name='output' type=dtype('float32') shape=['batch', 5]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'x'
Constant(value=1.0) -> setitem_val
Constant(value=[0]) -> init7_s1_0
Concat(init7_s1_0, init7_s1_0, axis=0) -> _onx_concat_init7_s1_0
Constant(value=2) -> init7_s_2
Constant(value=3) -> init7_s_3
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1, init7_s1_0) -> x::Shape:1::Sq0
Sub(x::Shape:1::Sq0, init7_s_2) -> _onx_sub_x::Shape:1::Sq0
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq0
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2, init7_s1_0) -> x::Shape1:2::Sq0
Sub(x::Shape1:2::Sq0, init7_s_3) -> _onx_sub_x::Shape1:2::Sq0
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq0
Concat(_onx_sub_x::Shape:1::Sq0::UnSq0, _onx_sub_x::Shape1:2::Sq0::UnSq0, axis=0) -> _onx_concat_sub_x::Shape:1::Sq0::UnSq0
Add(_onx_concat_init7_s1_0, _onx_concat_sub_x::Shape:1::Sq0::UnSq0) -> _onx_add_concat_init7_s1_0
Shape(x) -> x::Shape:
Sub(x::Shape:, _onx_add_concat_init7_s1_0) -> _onx_sub_x::Shape:
Expand(setitem_val, _onx_sub_x::Shape:) -> _onx_expand_setitem_val
Shape(_onx_expand_setitem_val) -> _onx_expand_setitem_val::Shape:
ConstantOfShape(_onx_expand_setitem_val::Shape:, value=[0.0]) -> _onx_constantofshape_expand_setitem_val::Shape:
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq02
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq02
Concat(init7_s1_0, init7_s1_0, _onx_sub_x::Shape:1::Sq0::UnSq02, _onx_sub_x::Shape1:2::Sq0::UnSq02, axis=0) -> _onx_concat_init7_s1_02
Pad(_onx_expand_setitem_val, _onx_concat_init7_s1_02) -> _onx_expand_setitem_val_padded
Pad(_onx_constantofshape_expand_setitem_val::Shape:, _onx_concat_init7_s1_02, setitem_val) -> x_mask
Mul(x, x_mask) -> _onx_mul_x
Add(_onx_mul_x, _onx_expand_setitem_val_padded) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Unable to interpret function <class 'torch._ops.OpOverload'>: <OpOverload(op='aten.fill_', overload='Tensor')>, searched for ['aten_fill__Tensor', 'fill__Tensor'] and attributes ['__qualname__', '__name__'], args=(slice_tensor, param_1), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-HSA] Message starts, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'init7_s1_0': (1,),
'init7_s1_1': (1,),
'init7_s1_3': (1,),
'param_1': (),
'slice_tensor': ('batch', 3),
'x': ('batch', 5)}
_known_types={'init7_s1_0': 7,
'init7_s1_1': 7,
'init7_s1_3': 7,
'param_1': 1,
'slice_tensor': 1,
'x': 1}
_known_devices={'slice_tensor': -1, 'x': -1}
_context=[]
_known_value_shape={'init7_s1_0': (0,), 'init7_s1_1': (1,), 'init7_s1_3': (3,)}
_known_constants=['init7_s1_0', 'init7_s1_1', 'init7_s1_3', 'param_1']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
fill__tensor -> set()
param_1 -> {fill__tensor}
slice_tensor -> {fill__tensor}
x -> {add_tensor, slice_tensor}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 5])))) --- 1:2:('batch', 5):
slice_tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- 1:2:('batch', 3):
param_1: ('run_node', ('', ('val', torch.float32, TracingShape([])))) --- 1:0:():
fill__tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.NEW_TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquareAdd()
def forward(self, x, param_1):
slice_tensor = torch.ops.aten.slice.Tensor(x, 1, 0, 3)
fill__tensor = torch.ops.aten.fill_.Tensor(slice_tensor, param_1); slice_tensor = param_1 = fill__tensor = None
add_tensor = torch.ops.aten.add.Tensor(x, 2); x = None
return add_tensor
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=2] = placeholder[target=x]
%slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 1, 0, 3), kwargs = {})
%param_1 : [num_users=1] = placeholder[target=param_1]
%fill__tensor : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_tensor, %param_1), kwargs = {})
%add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
return add_tensor
-- process.inputs_to_remove --
set()
-- process.progress --
node 3/6 target=aten.fill_.Tensor
-- 1 INPUTS
[GraphBuilder-HSA.1.make_tensor_input] x[1:batchx5]
-- 4 INITIALIZERS
[GraphBuilder-HSA.1.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-HSA.1.make_initializer] init7_s1_3[int64:int64:[3]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-HSA.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-HSA.1.make_initializer] param_1[torch.float32:torch.float32:[1.0]] - SOURCE: DynamoInterpret.placeholder.0
[GraphBuilder-HSA.4.make_node] slice_Tensor [@###:@] Slice:['x', 'init7_s1_0', 'init7_s1_3', 'init7_s1_1']->['slice_tensor']
-- 0 OUTPUTS
[GraphBuilder-HSA] Message completed, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.,
InplaceSetItemSquareAdd2#
code: yobx.torch.testing._model_eval_cases.InplaceSetItemSquareAdd2
forward#
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
yobx#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##ReshapeIsSqueezePattern.m1
init: name='init7_s_0' type=int64 shape=() -- array([0]) -- Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='init7_s3_0_1_2::RSh-1x1' type=int64 shape=(3, 1) -- array([0, 1, 2])-- GraphBuilder.constant_folding.from/fold(init7_s2_-1_1,init7_s3_0_1_2)##init7_s3_0_1_2/Opset.make_node.1/Shape##init7_s2_-1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='fill::T10' type=float32 shape=(3, 2) -- GraphBuilder.constant_folding.from/fold(fill)##fill/
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_2::RSh1' type=float32 shape=(1,) -- array([3.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.0##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1) -> x::Shape:1::Sq
Range(init7_s_0, x::Shape:1::Sq, init7_s_1) -> _onx_range_init7_s_0
Slice(_onx_range_init7_s_0, init7_s1_0, init7_s1_2, init7_s1_0, init7_s1_1) -> _onx_slice_range_init7_s_0
Unsqueeze(_onx_slice_range_init7_s_0, init7_s1_1) -> _onx_slice_range_init7_s_0::RSh-1x1
Slice(x, init7_s1_0, init7_s1_2, init7_s1_0) -> slice_3
Transpose(slice_3, perm=[1,0]) -> slice_3::T10
ScatterND(slice_3::T10, init7_s3_0_1_2::RSh-1x1, fill::T10) -> _onx_scatternd_slice_3::T10
Transpose(_onx_scatternd_slice_3::T10, perm=[1,0]) -> slice_scatter
ScatterND(x, _onx_slice_range_init7_s_0::RSh-1x1, slice_scatter) -> output_0
Add(output_0, init1_s_::RSh1) -> output_1
Add(output_0, init1_s_2::RSh1) -> output_2
output: name='output_1' type=dtype('float32') shape=['batch', 5]
output: name='output_2' type=dtype('float32') shape=['batch', 5]
dynamo-ir#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='val_3' type=int64 shape=(1,) -- array([0])
init: name='val_7' type=int64 shape=(1,) -- array([2])
init: name='val_18' type=int64 shape=(1,) -- array([3])
init: name='val_22' type=int64 shape=(1,) -- array([1])
init: name='value_0' type=float32 shape=() -- array([1.], dtype=float32)
init: name='scalar_tensor_default' type=float32 shape=() -- array([2.], dtype=float32)
init: name='scalar_tensor_default_1' type=float32 shape=() -- array([3.], dtype=float32)
init: name='val_0' type=int64 shape=() -- array([0])
init: name='val_19' type=int64 shape=() -- array([1])
init: name='val_42' type=int64 shape=(1,) -- array([-1])
Shape(x, start=0) -> val_48
Gather(val_48, val_0, axis=0) -> val_49
Range(val_0, val_49, val_19) -> val_50
Slice(val_50, val_3, val_7, val_3, val_22) -> val_54
Unsqueeze(val_54, val_42) -> val_55
Slice(x, val_3, val_7, val_3, val_22) -> slice_1
Slice(slice_1, val_3, val_18, val_22, val_22) -> slice_2
Shape(slice_2) -> shape
Expand(value_0, shape) -> fill
Transpose(fill, perm=[1,0]) -> val_44
Shape(slice_1, start=0) -> val_35
Gather(val_35, val_19, axis=0) -> val_36
Range(val_0, val_36, val_19) -> val_37
Slice(val_37, val_3, val_18, val_3, val_22) -> val_41
Unsqueeze(val_41, val_42) -> val_43
Transpose(slice_1, perm=[1,0]) -> val_45
ScatterND(val_45, val_43, val_44, reduction=b'none') -> val_46
Transpose(val_46, perm=[1,0]) -> slice_scatter
ScatterND(x, val_55, slice_scatter, reduction=b'none') -> slice_scatter_1
Add(slice_scatter_1, scalar_tensor_default) -> add_12
Add(slice_scatter_1, scalar_tensor_default_1) -> add_16
output: name='add_12' type=dtype('float32') shape=['batch', 5]
output: name='add_16' type=dtype('float32') shape=['batch', 5]
tracing#
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
opset: domain='aten' version=1
input: name='x' type=dtype('float32') shape=['batch', 5]
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([2.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init1_s_2::RSh1' type=float32 shape=(1,) -- array([3.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_2,init7_s1_1)##init1_s_2/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape##Opset.make_node.1/Shape
aten_setitem[aten](x) -> setitem
Add(setitem, init1_s_::RSh1) -> output_0
Add(setitem, init1_s_2::RSh1) -> output_1
output: name='output_0' type=dtype('float32') shape=['batch', 5]
output: name='output_1' type=dtype('float32') shape=['batch', 5]
----- function name=aten_setitem domain=aten
----- doc_string: -- function_options=FunctionOptions(export_as_function=...
opset: domain='' version=21
input: 'x'
Constant(value=1.0) -> setitem_val
Constant(value=[0]) -> init7_s1_0
Concat(init7_s1_0, init7_s1_0, axis=0) -> _onx_concat_init7_s1_0
Constant(value=2) -> init7_s_2
Constant(value=3) -> init7_s_3
Shape(x, end=1, start=0) -> x::Shape:1
Squeeze(x::Shape:1, init7_s1_0) -> x::Shape:1::Sq0
Sub(x::Shape:1::Sq0, init7_s_2) -> _onx_sub_x::Shape:1::Sq0
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq0
Shape(x, end=2, start=1) -> x::Shape1:2
Squeeze(x::Shape1:2, init7_s1_0) -> x::Shape1:2::Sq0
Sub(x::Shape1:2::Sq0, init7_s_3) -> _onx_sub_x::Shape1:2::Sq0
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq0
Concat(_onx_sub_x::Shape:1::Sq0::UnSq0, _onx_sub_x::Shape1:2::Sq0::UnSq0, axis=0) -> _onx_concat_sub_x::Shape:1::Sq0::UnSq0
Add(_onx_concat_init7_s1_0, _onx_concat_sub_x::Shape:1::Sq0::UnSq0) -> _onx_add_concat_init7_s1_0
Shape(x) -> x::Shape:
Sub(x::Shape:, _onx_add_concat_init7_s1_0) -> _onx_sub_x::Shape:
Expand(setitem_val, _onx_sub_x::Shape:) -> _onx_expand_setitem_val
Shape(_onx_expand_setitem_val) -> _onx_expand_setitem_val::Shape:
ConstantOfShape(_onx_expand_setitem_val::Shape:, value=[0.0]) -> _onx_constantofshape_expand_setitem_val::Shape:
Unsqueeze(_onx_sub_x::Shape:1::Sq0, init7_s1_0) -> _onx_sub_x::Shape:1::Sq0::UnSq02
Unsqueeze(_onx_sub_x::Shape1:2::Sq0, init7_s1_0) -> _onx_sub_x::Shape1:2::Sq0::UnSq02
Concat(init7_s1_0, init7_s1_0, _onx_sub_x::Shape:1::Sq0::UnSq02, _onx_sub_x::Shape1:2::Sq0::UnSq02, axis=0) -> _onx_concat_init7_s1_02
Pad(_onx_expand_setitem_val, _onx_concat_init7_s1_02) -> _onx_expand_setitem_val_padded
Pad(_onx_constantofshape_expand_setitem_val::Shape:, _onx_concat_init7_s1_02, setitem_val) -> x_mask
Mul(x, x_mask) -> _onx_mul_x
Add(_onx_mul_x, _onx_expand_setitem_val_padded) -> setitem
output: name='setitem' type=? shape=?
new-tracing#
FAILED
Unable to interpret function <class 'torch._ops.OpOverload'>: <OpOverload(op='aten.fill_', overload='Tensor')>, searched for ['aten_fill__Tensor', 'fill__Tensor'] and attributes ['__qualname__', '__name__'], args=(slice_tensor, param_1), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-RAE] Message starts, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.
input_names=['x']
output_names=[]
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = 'batch'
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=['x']
output_dynamic_dimensions_source_flat=None
dynamic_alias={}
dynamic_shapes={'x': {0: Dim('batch', min=0)}}
_known_shapes={'init7_s1_0': (1,),
'init7_s1_1': (1,),
'init7_s1_3': (1,),
'param_1': (),
'slice_tensor': ('batch', 3),
'x': ('batch', 5)}
_known_types={'init7_s1_0': 7,
'init7_s1_1': 7,
'init7_s1_3': 7,
'param_1': 1,
'slice_tensor': 1,
'x': 1}
_known_devices={'slice_tensor': -1, 'x': -1}
_context=[]
_known_value_shape={'init7_s1_0': (0,), 'init7_s1_1': (1,), 'init7_s1_3': (3,)}
_known_constants=['init7_s1_0', 'init7_s1_1', 'init7_s1_3', 'param_1']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
fill__tensor -> set()
param_1 -> {fill__tensor}
slice_tensor -> {fill__tensor}
x -> {add_tensor_1, slice_tensor, add_tensor}
--TORCH-SHAPES--
x: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 5])))) --- 1:2:('batch', 5):
slice_tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- 1:2:('batch', 3):
param_1: ('run_node', ('', ('val', torch.float32, TracingShape([])))) --- 1:0:():
fill__tensor: ('run_node', ('', ('val', torch.float32, TracingShape([TracingInt('batch'), 3])))) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.NEW_TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
InplaceSetItemSquareAdd2()
def forward(self, x, param_1):
slice_tensor = torch.ops.aten.slice.Tensor(x, 1, 0, 3)
fill__tensor = torch.ops.aten.fill_.Tensor(slice_tensor, param_1); slice_tensor = param_1 = fill__tensor = None
add_tensor = torch.ops.aten.add.Tensor(x, 2)
add_tensor_1 = torch.ops.aten.add.Tensor(x, 3); x = None
return (add_tensor, add_tensor_1)
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=3] = placeholder[target=x]
%slice_tensor : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 1, 0, 3), kwargs = {})
%param_1 : [num_users=1] = placeholder[target=param_1]
%fill__tensor : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_tensor, %param_1), kwargs = {})
%add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
%add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {})
return (add_tensor, add_tensor_1)
-- process.inputs_to_remove --
set()
-- process.progress --
node 3/7 target=aten.fill_.Tensor
-- 1 INPUTS
[GraphBuilder-RAE.1.make_tensor_input] x[1:batchx5]
-- 4 INITIALIZERS
[GraphBuilder-RAE.1.make_initializer] init7_s1_0[int64:int64:[0]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-RAE.1.make_initializer] init7_s1_3[int64:int64:[3]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-RAE.1.make_initializer] init7_s1_1[int64:int64:[1]] - SOURCE: Opset.make_node.1/Shape
[GraphBuilder-RAE.1.make_initializer] param_1[torch.float32:torch.float32:[1.0]] - SOURCE: DynamoInterpret.placeholder.0
[GraphBuilder-RAE.4.make_node] slice_Tensor [@###:@] Slice:['x', 'init7_s1_0', 'init7_s1_3', 'init7_s1_1']->['slice_tensor']
-- 0 OUTPUTS
[GraphBuilder-RAE] Message completed, there are 4 initializers, 1 nodes, 1 inputs, 1 outputs.,
SignatureFloat1#
code: yobx.torch.testing._model_eval_cases.SignatureFloat1
forward#
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
yobx#
inputs:
#2[(T1s4x3,float),(T1s8x3,float)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='mul' type=float32 shape=(1,) -- array([0.75], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_onx_mul_b_buff)##_onx_mul_b_buff/GraphBuilder.constant_folding.from/fold(b_buff,init1_s_::RSh1)##b_buff/DynamoInterpret.placeholder.0##init1_s_::RSh1/GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(mul_Tensor)##init7_s1_1/Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.51618 , 0.09904081, -0.01243608], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.28516936], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='yobx'
dynamo-ir#
inputs:
#2[(T1s4x3,float),(T1s8x3,float)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.37671953, -0.2158258 , 0.30445737], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.45429665], dtype=float32)
init: name='mul_2' type=float32 shape=(1,) -- array([0.75], dtype=float32)
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, mul_2) -> sub_2
output: name='sub_2' type=dtype('float32') shape=['s77', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,float) but names=['x'], model=SignatureFloat1, export='dynamo-ir'
tracing#
FAILED
Unable to interpret method 'aten_meth_mul', args=(buff, alpha), kwargs={}, dispatcher=None
--DEBUG--
-- to print the exported program: PRINT_EXPORTED_PROGRAM=1
[GraphBuilder-NKI] Message starts, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
input_names=['x', 'alpha']
output_names=[]
--CONSTRAINTS--
batch = {'s26'}
s26 = {'batch'}
--SHAPE--
_dynamic_examples=
dynamic_objects=
batch = WrapSym(batch)
dynamic_objects_rev=
'batch' = <class 'list'>
tuple
'batch'
ERR**: <class 'torch.SymInt'>:'batch'
dynamic_dimensions_source={'batch': [{'axis': 0, 'input_name': 'x'}]}
dynamic_dimensions_source_flat=[0]
output_dynamic_dimensions_source_flat=None
dynamic_alias={'s26': 'batch'}
dynamic_shapes=({0: Dim('batch', min=1, max=1024)}, None)
_known_shapes={'_sub_ime__linear__onx_matmul_input_1': ('batch', 1),
'_sub_ime__linear_input_1': ('batch', 3),
'_sub_ime__linear_linear': ('batch', 1),
'_sub_ime__linear_output': ('batch', 1),
'_sub_ime__linear_weight::T10': (3, 1),
'alpha': (),
'buff': (1,),
'linear': ('batch', 1),
'linear.bias': (1,),
'linear.weight': (1, 3),
'sigmoid': ('batch', 1),
'x': ('batch', 3)}
_known_types={'_sub_ime__linear__onx_matmul_input_1': 1,
'_sub_ime__linear_input_1': 1,
'_sub_ime__linear_linear': 1,
'_sub_ime__linear_output': 1,
'_sub_ime__linear_weight::T10': 1,
'alpha': 1,
'buff': 1,
'linear': 1,
'linear.bias': 1,
'linear.weight': 1,
'sigmoid': 1,
'x': 1}
_known_devices={'_sub_ime__linear_input_1': -1, 'alpha': -1, 'x': -1}
_context=[]
_known_value_shape={}
_known_constants=['_sub_ime__linear_weight::T10', 'buff', 'linear.bias', 'linear.weight']
_known_ranks (with no shape)={}
--PARAMETERS--
_parameter_renaming=
--TORCH-USERS--
alpha -> {mul}
buff -> {mul}
linear -> {sigmoid}
mul -> {sub}
sigmoid -> {sub}
x -> {linear}
--TORCH-SHAPES--
x: ('run_node', (('example_value', torch.float32, torch.Size([4, 3])), ('val', torch.float32, torch.Size([s26, 3])))) --- 1:2:('batch', 3):
alpha: ('run_node', (('example_value', torch.float32, torch.Size([])), ('val', torch.float32, torch.Size([])))) --- 1:0:():
linear: ('run_node', ('', '')) --- 1:2:('batch', 1):
sigmoid: ('run_node', ('', '')) --- 1:2:('batch', 1):
buff: ('run_node', ('', '')) --- 1:1:(1,):
mul: ('run_node', ('', '')) --- :::
--ONNX--
-- EXEPATH --
export
export_options=ExportOptions(tracing=TracingMode.TRACING, aten_as_function=('aten.histc.default', 'aten.index_copy.default', 'aten.index_put.default', 'aten._grouped_mm.default', 'aten.setitem', <built-in function setitem>))
function_options=None
-- process.graph_module --
SignatureFloat1(
(linear): Linear(in_features=3, out_features=1, bias=True)
)
def forward(self, x, alpha : float = 2.0):
linear = self.linear(x); x = None
sigmoid = torch.sigmoid(linear); linear = None
buff = self.buff
mul = buff.mul(alpha); buff = alpha = None
sub = sigmoid - mul; sigmoid = mul = None
return sub
# To see more debug info, please use `graph_module.print_readable()`
-- process.graph_module.graph --
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
-- process.inputs_to_remove --
set()
-- process.progress --
node 5/8 target=mul
-- 2 INPUTS
[GraphBuilder-NKI.1.make_tensor_input] x[1:batchx3]
[GraphBuilder-NKI.1.make_tensor_input] alpha[1:]
-- 3 INITIALIZERS
[GraphBuilder-NKI.1.make_initializer] linear.weight[torch.float32:torch.float32:[0.03782457485795021, -0.1351017951965332, -0.06911272555589676]] - SOURCE: GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)
[GraphBuilder-NKI.1.make_initializer] linear.bias[torch.float32:torch.float32:[-0.10022220015525818]] - SOURCE: GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
[GraphBuilder-NKI.1.make_initializer] buff[torch.float32:torch.float32:[0.5]] - SOURCE: DynamoInterpret.get_attr.0
[GraphBuilder-NKI.4.make_node] .make_nodes [@:@ ] Identity:['x']->['_sub_ime__linear_input_1']
[GraphBuilder-NKI.4.make_node] linear [#:# ] Transpose:['linear.weight']->['_sub_ime__linear_weight::T10']
[GraphBuilder-NKI.4.make_node] Opset [@#:# ] MatMul:['_sub_ime__linear_input_1', '_sub_ime__linear_weight::T10']->['_sub_ime__linear__onx_matmul_input_1']
[GraphBuilder-NKI.4.make_node] Opset2 [##:# ] Add:['_sub_ime__linear__onx_matmul_input_1', 'linear.bias']->['_sub_ime__linear_linear']
[GraphBuilder-NKI.4.make_node] .output [#:# ] Identity:['_sub_ime__linear_linear']->['_sub_ime__linear_output']
[GraphBuilder-NKI.4.make_node] .make_nodes2 [#:# ] Identity:['_sub_ime__linear_output']->['linear']
[GraphBuilder-NKI.4.make_node] sigmoid [#:# ] Sigmoid:['linear']->['sigmoid']
-- 0 OUTPUTS
[GraphBuilder-NKI] Message completed, there are 3 initializers, 7 nodes, 2 inputs, 2 outputs.
new-tracing#
FAILED
Unable to convert TracingInt('batch') into string
SignatureInt1#
code: yobx.torch.testing._model_eval_cases.SignatureInt1
forward#
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
yobx#
inputs:
#2[(T1s4x3,int),(T1s8x3,int)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_2' type=int64 shape=(1,) -- array([2]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.33427685, 0.38108557, -0.5341409 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.0069776], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Slice(x, init7_s1_1, init7_s1_2, init7_s1_1) -> slice_1
Add(sub, slice_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='yobx'
dynamo-ir#
inputs:
#2[(T1s4x3,int),(T1s8x3,int)]shapes:
({0:Dim(batch)},None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.32234368, 0.20540713, -0.42376068], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.45084313], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
init: name='val_7' type=int64 shape=(1,) -- array([2])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Slice(x, val_3, val_7, val_3, val_3) -> slice_1
Add(sub_2, slice_1) -> add_12
output: name='add_12' type=dtype('float32') shape=['s77', 1]
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt1, export='dynamo-ir'
tracing#
FAILED
[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Concat node. Name:'_getitem_slicenSD' Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/tensor/concat.cc:139 onnxruntime::common::Status onnxruntime::ConcatBase::PrepareForCompute(onnxruntime::OpKernelContext*, const InlinedTensorsVector&, onnxruntime::Prepare&) const input_rank == reference_rank was false. Ranks of input data are different, cannot concatenate them. expected rank: 1 got: 2
new-tracing#
FAILED
Unable to convert TracingInt('batch') into string
SignatureInt2#
code: yobx.torch.testing._model_eval_cases.SignatureInt2
forward#
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
yobx#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.29279187, 0.1419851 , 0.39510235], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.1290936], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, select) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'batch']
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='yobx'
dynamo-ir#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s77', 3]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.21152173, -0.01215376, 0.3124781 ], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.37264183], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_12' type=int64 shape=() -- array([1])
Gather(x, val_12, axis=1) -> select
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
Add(sub_2, select) -> add_14
output: name='add_14' type=dtype('float32') shape=['s77', 's77']
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='dynamo-ir'
tracing#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='i' type=dtype('int64') shape=None
init: name='linear.bias' type=float32 shape=(1,) -- array([0.53036976], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='getitem_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_axis_0' type=int64 shape=(1,) -- array([0]) -- DynamoInterpreter._getitem_slice.axis.2##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- DynamoInterpreter._getitem_slice.int_end
init: name='getitem_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_sub_ime__linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.39079183, 0.3826622 , -0.3570949 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime__linear_weight::T10,init7_s2_1_3)##_sub_ime__linear_weight::T10/GraphBuilder.constant_folding.from/fold(linear.weight)##linear.weight/GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Add(i, init7_s_1) -> getitem_slice_end_1
Unsqueeze(getitem_slice_end_1, getitem_axis_0) -> getitem_slice_end_1::UnSq0
Shape(x) -> getitem_shape
GatherElements(getitem_shape, getitem_axis_0) -> getitem_end
Concat(getitem_end, getitem_slice_end_1::UnSq0, axis=0) -> _onx_concat_getitem_end
Gemm(x, GemmTransposePattern--_sub_ime__linear_weight::T10, linear.bias, transB=1) -> _sub_ime__linear_linear
Sigmoid(_sub_ime__linear_linear) -> sigmoid
Sub(sigmoid, buff) -> sub
Unsqueeze(i, getitem_axis_0) -> i::UnSq0
Concat(getitem_axis_0, i::UnSq0, axis=0) -> _onx_concat_getitem_axis_0
Slice(x, _onx_concat_getitem_axis_0, _onx_concat_getitem_end, getitem_axis, getitem_step) -> getitem_sliced
Squeeze(getitem_sliced, init7_s1_1) -> getitem
Add(sub, getitem) -> output
output: name='output' type=dtype('float32') shape=['batch', 'NEWDIM_slice']
FAILED
[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Concat node. Name:'_getitem_slicenSD' Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/tensor/concat.cc:139 onnxruntime::common::Status onnxruntime::ConcatBase::PrepareForCompute(onnxruntime::OpKernelContext*, const InlinedTensorsVector&, onnxruntime::Prepare&) const input_rank == reference_rank was false. Ranks of input data are different, cannot concatenate them. expected rank: 1 got: 2
new-tracing#
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s_1' type=int64 shape=() -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--param_1' type=float32 shape=(1, 3) -- array([ 0.19034088, -0.14372618, 0.5627756 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,param_1)##param_1/DynamoInterpret.placeholder.0##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.19902611], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gather(x, init7_s_1, axis=1) -> select_int
Gemm(x, GemmTransposePattern--param_1, linear.bias, transB=1) -> addmm_default
Sigmoid(addmm_default) -> sigmoid_default
Sub(sigmoid_default, buff) -> sub_tensor
Add(sub_tensor, select_int) -> output
output: name='output' type=dtype('float32') shape=['batch', 'batch']
FAILED
Input mismatch, inputs[0]=(T1r2,int) but names=['x'], model=SignatureInt2, export='new-tracing'
SignatureListFixedLength#
code: yobx.torch.testing._model_eval_cases.SignatureListFixedLength
forward#
def forward(self, x, lx: list):
return torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.46800545, -0.1644351 , -0.00810799], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.39386934], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.18170342, -0.2575322 , -0.29987118], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([0.26654318], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_2
ReduceSum(lx_1, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul_4
Add(sub_2, mul_4) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([-0.2663221], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.34504578, -0.10411504, -0.2340521 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
ReduceSum(lx_1, init7_s1_1, keepdims=1) -> sum_1
Mul(lx_0, sum_1) -> mul
Add(sub, mul) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
new-tracing#
FAILED
'Dim' object has no attribute 'items'
SignatureListFixedWithNone#
code: yobx.torch.testing._model_eval_cases.SignatureListFixedWithNone
forward#
def forward(self, lx):
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
yobx#
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
dynamo-ir#
FAILED
Failed to export the model with torch.export. [96mThis is step 1/3[0m of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and submit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the [96m*torch.export*[0m component and attach the full error stack as well as reproduction scripts.
## Exception summary
<class 'torch._dynamo.exc.UserError'>: Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
(Refer to the full stack trace above for more information.)
tracing#
FAILED
Length mismatch between x (len=3) and dynamic_shapes (len=2); dynamic_shapes must have one entry per element of x, or be None to use no dynamic dimensions, dynamic_shapes=[{0: Dim('batch', min=0)}, {0: Dim('batch', min=0)}]
new-tracing#
FAILED
Length mismatch between arg (3) and the dynamic_shapes (2), name='lx'
SignatureListVariableLength#
code: yobx.torch.testing._model_eval_cases.SignatureListVariableLength
forward#
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(dim=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
yobx#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='b_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.placeholder.0
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.07056267, -0.48215383, -0.01945801], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.31997368], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, b_buff) -> sub
Add(sub, sum_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
dynamo-ir#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='linear.weight' type=float32 shape=(1, 3) -- array([ 0.1020776 , -0.04105373, -0.31296235], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.12330545], dtype=float32)
init: name='buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)
init: name='val_3' type=int64 shape=(1,) -- array([1])
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, val_3, noop_with_empty_axes=0, keepdims=1) -> sum_1
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Sub(sigmoid, buff) -> sub_4
Add(sub_4, sum_1) -> add_15
output: name='add_15' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
tracing#
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='lx_0' type=dtype('float32') shape=['batch', 1]
input: name='lx_1' type=dtype('float32') shape=['batch', 2]
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='_traced_m2.linear.bias' type=float32 shape=(1,) -- array([-0.38783452], dtype=float32)-- GraphBuilder.make_nodes/from_traced_m2.linear.bias##DynamoInterpret.get_attr.1/P(_traced_m2.linear.bias)
init: name='_traced_m2_buff' type=float32 shape=(1,) -- array([0.5], dtype=float32)-- DynamoInterpret.get_attr.0
init: name='GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.3161527 , 0.33454782, -0.29694074], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime___traced_m2_linear_weight::T10,init7_s2_1_3)##_sub_ime___traced_m2_linear_weight::T10/GraphBuilder.constant_folding.from/fold(_traced_m2.linear.weight)##_traced_m2.linear.weight/GraphBuilder.make_nodes/from_traced_m2.linear.weight##DynamoInterpret.get_attr.1/P(_traced_m2.linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Concat(lx_0, lx_1, axis=1) -> cat
ReduceSum(cat, init7_s1_1, keepdims=1) -> sum_1
Gemm(x, GemmTransposePattern--_sub_ime___traced_m2_linear_weight::T10, _traced_m2.linear.bias, transB=1) -> _sub_ime___traced_m2_linear_linear
Sigmoid(_sub_ime___traced_m2_linear_linear) -> sigmoid
Sub(sigmoid, _traced_m2_buff) -> sub
Add(sub, sum_1) -> output
output: name='output' type=dtype('float32') shape=['batch', 1]
FAILED
diff.1
new-tracing#
FAILED
'Dim' object has no attribute 'items'
SignatureShapeAsIndex#
code: yobx.torch.testing._model_eval_cases.SignatureShapeAsIndex
forward#
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
yobx#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='GemmTransposePattern--p_linear_weight::T10' type=float32 shape=(1, 3) -- array([ 0.269283 , -0.04220374, 0.06395845], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s2_1_3,p_linear_weight::T10)##p_linear_weight::T10/GraphBuilder.constant_folding.from/fold(p_linear_weight)##p_linear_weight/DynamoInterpret.placeholder.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
init: name='linear.bias' type=float32 shape=(1,) -- array([0.05627105], dtype=float32)-- DynamoInterpret.placeholder.1/P(linear.bias)
Gemm(x, GemmTransposePattern--p_linear_weight::T10, linear.bias, transB=1) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(y, end=2, start=1) -> y::Shape1:2
Slice(add, init7_s1_0, y::Shape1:2, init7_s1_1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 'length']
dynamo-ir#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.weight' type=float32 shape=(1, 3) -- array([-0.11689394, 0.10962067, 0.47619072], dtype=float32)
init: name='linear.bias' type=float32 shape=(1,) -- array([-0.07751294], dtype=float32)
init: name='val_8' type=int64 shape=(1,) -- array([1])
init: name='val_1' type=int64 shape=(1,) -- array([0])
Gemm(x, linear.weight, linear.bias, beta=1.00, transB=1, alpha=1.00, transA=0) -> linear
Sigmoid(linear) -> sigmoid
Add(sigmoid, x) -> add_6
Shape(y, end=2, start=1) -> val_0
Slice(add_6, val_1, val_0, val_8, val_8) -> slice_1
output: name='slice_1' type=dtype('float32') shape=['batch', 'length']
tracing#
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 3]
input: name='y' type=dtype('float32') shape=['batch', 'length']
init: name='linear.bias' type=float32 shape=(1,) -- array([0.37225145], dtype=float32)-- GraphBuilder.make_nodes/fromlinear.bias##DynamoInterpret.get_attr.1/P(linear.bias)
init: name='init7_s1_0' type=int64 shape=(1,) -- array([0]) -- Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape##Opset.make_node.1/Shape
init: name='init7_s1_1' type=int64 shape=(1,) -- array([1]) -- Opset.make_node.1/Shape
init: name='getitem_2_axis' type=int64 shape=(2,) -- array([0, 1]) -- DynamoInterpreter._getitem_slice.axis.1
init: name='getitem_2_start' type=int64 shape=(2,) -- array([0, 0]) -- DynamoInterpreter._getitem_slice.2
init: name='getitem_2_step' type=int64 shape=(2,) -- array([1, 1]) -- DynamoInterpreter._getitem_slice.3
init: name='GemmTransposePattern--_sub_ime__linear_weight::T10' type=float32 shape=(1, 3) -- array([-0.21513638, -0.32068107, 0.3226202 ], dtype=float32)-- GraphBuilder.constant_folding.from/fold(_sub_ime__linear_weight::T10,init7_s2_1_3)##_sub_ime__linear_weight::T10/GraphBuilder.constant_folding.from/fold(linear.weight)##linear.weight/GraphBuilder.make_nodes/fromlinear.weight##DynamoInterpret.get_attr.1/P(linear.weight)##init7_s2_1_3/TransposeEqualReshapePattern.apply.new_shape
Gemm(x, GemmTransposePattern--_sub_ime__linear_weight::T10, linear.bias, transB=1) -> _sub_ime__linear_linear
Sigmoid(_sub_ime__linear_linear) -> sigmoid
Add(sigmoid, x) -> add
Shape(add) -> getitem_2_shape
GatherElements(getitem_2_shape, init7_s1_0) -> getitem_2_end
Shape(y) -> size
Gather(size, init7_s1_1) -> _onx_gather_size2
Concat(getitem_2_end, _onx_gather_size2, axis=0) -> _onx_concat_getitem_2_end
Slice(add, getitem_2_start, _onx_concat_getitem_2_end, getitem_2_axis, getitem_2_step) -> output
output: name='output' type=dtype('float32') shape=['NEWDIM_slice', 'NEWDIM_slice1']
new-tracing#
FAILED
TracingInt('length') has no concrete integer value; pass a concrete int or check .value
TypeBFloat16#
code: yobx.torch.testing._model_eval_cases.TypeBFloat16
forward#
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
yobx#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch', 4]
dynamo-ir#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['batch', 4]
Cast(x, to=16) -> _to_copy
Add(_to_copy, _to_copy) -> add_3
Cast(add_3, to=1) -> _to_copy_1
output: name='_to_copy_1' type=dtype('float32') shape=['batch', 4]
FAILED
[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name 'node_add_3'
tracing#
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch', 4]
Add(x, x) -> add-x
Cast(add-x, to=16) -> add
Cast(add, to=1) -> output
output: name='output' type=dtype('float32') shape=['batch', 4]
new-tracing#
FAILED
Cannot determine if it is a constant argument dtype(torch.bfloat16)
Vmap#
code: yobx.torch.testing._model_eval_cases.Vmap
forward#
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return torch.vmap(f)(x, y)
yobx#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_2']
input: name='y' type=dtype('float32') shape=['batch_3']
init: name='init1_s_' type=float32 shape=() -- array([1.], dtype=float32)-- shape_type_compute._cast_inputs.0
Shape(y, end=1, start=0) -> y::Shape:1
Squeeze(x) -> clone_default::Sq
Squeeze(y) -> clone_default_1::Sq
Mul(clone_default::Sq, clone_default_1::Sq) -> mul
Add(mul, init1_s_) -> add
Expand(add, y::Shape:1) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_3']
dynamo-ir#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s17']
input: name='y' type=dtype('float32') shape=['s17']
init: name='_to_copy' type=float32 shape=() -- array([1.], dtype=float32)
Mul(x, y) -> mul
Add(mul, _to_copy) -> add_2
output: name='add_2' type=dtype('float32') shape=['s17']
tracing#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_2']
input: name='y' type=dtype('float32') shape=['batch_3']
init: name='init1_s_::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init1_s_,init7_s1_1)##init1_s_/shape_type_compute._cast_inputs.1(add)##init7_s1_1/Opset.make_node.1/Shape
Mul(x, y) -> mul
Add(mul, init1_s_::RSh1) -> output
output: name='output' type=dtype('float32') shape=['DYN0^DYN1']
new-tracing#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=21
input: name='x' type=dtype('float32') shape=['batch_2']
input: name='y' type=dtype('float32') shape=['batch_3']
init: name='param_1::RSh1' type=float32 shape=(1,) -- array([1.], dtype=float32)-- GraphBuilder.constant_folding.from/fold(init7_s1_1,param_1)##param_1/DynamoInterpret.placeholder.0##init7_s1_1/Opset.make_node.1/Shape
Mul(x, y) -> mul_tensor
Add(mul_tensor, param_1::RSh1) -> output
output: name='output' type=dtype('float32') shape=['batch_2']
VmapPython#
code: yobx.torch.testing._model_eval_cases.VmapPython
forward#
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return patched_vmap(f)(x, y)
yobx#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=21
opset: domain='local_functions' version=1
input: name='x' type=dtype('float32') shape=['batch_2']
input: name='y' type=dtype('float32') shape=['batch_3']
init: name='init1_s_2_cst2init' type=float32 shape=() -- array([1.], dtype=float32)-- GraphBuilderPatternOptimization.make_initializer.1/Small
Scan(x, y, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_axes=[0], scan_output_directions=[0]) -> output_0
output: name='output_0' type=dtype('float32') shape=['batch_3']
----- subgraph ---- Scan - scan - att.body=G1 -- level=1 -- scan_0_movedim,scan_1_movedim_1 -> output_0
input: name='scan_0_movedim' type=dtype('float32') shape=None
input: name='scan_1_movedim_1' type=dtype('float32') shape=None
Mul(scan_0_movedim, scan_1_movedim_1) -> mul2
Add(mul2, init1_s_2_cst2init) -> output_0
output: name='output_0' type=dtype('float32') shape=None
dynamo-ir#
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
opset: domain='' version=20
input: name='x' type=dtype('float32') shape=['s17']
input: name='y' type=dtype('float32') shape=['s17']
init: name='scalar_tensor_default' type=float32 shape=() -- array([1.], dtype=float32)
Scan(x, y, body=G1, num_scan_inputs=2, scan_input_directions=[0,0], scan_output_directions=[0]) -> getitem
output: name='getitem' type=dtype('float32') shape=['s17']
----- subgraph ---- Scan - node_scan__0 - att.body=G1 -- level=1 -- permute_scan_combine_graph_0__subgraph_in,permute_1_scan_combine_graph_0__subgraph_in -> add_scan_combine_graph_0
input: name='permute_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=None
input: name='permute_1_scan_combine_graph_0__subgraph_in' type=dtype('float32') shape=None
Mul(permute_scan_combine_graph_0__subgraph_in, permute_1_scan_combine_graph_0__subgraph_in) -> mul
Add(mul, scalar_tensor_default) -> add_scan_combine_graph_0
output: name='add_scan_combine_graph_0' type=dtype('float32') shape=None
tracing#
FAILED
symbolically traced variables cannot be used as inputs to control flow
new-tracing#
FAILED
list index out of range
Summary#
case |
dynamo-ir |
new-tracing |
tracing |
yobx |
|---|---|---|---|---|
FAIL |
FAIL |
|||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
FAIL |
FAIL |
||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
|||
FAIL |
FAIL |
|||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
|||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
||||
FAIL |
FAIL |
|||
FAIL |
FAIL |