Exported Programs with Dynamic Shapes¶
The following script shows the exported program for many short cases exported with different options. This steps happens before converting into ONNX.
<<<
import inspect
import textwrap
import pandas
from onnx_diagnostic.helpers import string_type
from onnx_diagnostic.torch_export_patches.eval import discover, run_exporter
from onnx_diagnostic.ext_test_case import unit_test_going
cases = discover()
print()
print(":ref:`Summary <led-summary-exported-program>`")
print()
sorted_cases = sorted(cases.items())
if unit_test_going():
sorted_cases = sorted_cases[:3]
for name, cls_model in sorted_cases:
print(f"* :ref:`{name} <led-model-case-export-{name}>`")
print()
print()
obs = []
for name, cls_model in sorted(cases.items()):
print()
print(f".. _led-model-case-export-{name}:")
print()
print(name)
print("=" * len(name))
print()
print(
f"code: :class:`onnx_diagnostic.torch_export_patches.eval.model_cases.{name}`"
)
print()
print("forward")
print("+++++++")
print()
print(".. code-block:: python")
print()
src = inspect.getsource(cls_model.forward)
if src:
print(textwrap.indent(textwrap.dedent(src), " "))
else:
print(" # code is missing")
print()
print()
for exporter in (
"export-strict",
"export-nostrict",
"export-nostrict-oblivious",
"export-nostrict-decall",
"export-tracing",
):
expname = exporter.replace("export-", "")
print()
print(expname)
print("+" * len(expname))
print()
res = run_exporter(exporter, cls_model, True, quiet=True)
case_ref = f":ref:`{name} <led-model-case-export-{name}>`"
expo = exporter.split("-", maxsplit=1)[-1]
if "inputs" in res:
print(f"* **inputs:** ``{string_type(res['inputs'], with_shape=True)}``")
if "dynamic_shapes" in res:
print(f"* **shapes:** ``{string_type(res['dynamic_shapes'])}``")
print()
print()
if "exported" in res:
print(".. code-block:: text")
print()
print(textwrap.indent(str(res["exported"].graph), " "))
print()
print()
obs.append(dict(case=case_ref, error="", exporter=expo))
else:
print("**FAILED**")
print()
print(".. code-block:: text")
print()
err = str(res["error"])
if err:
print(textwrap.indent(err, " "))
else:
print(" # no error found for the failure")
print()
print()
obs.append(dict(case=case_ref, error="FAIL", exporter=expo))
print()
print(".. _led-summary-exported-program:")
print()
print("Summary")
print("+++++++")
print()
df = pandas.DataFrame(obs)
piv = df.pivot(index="case", columns="exporter", values="error")
print(piv.to_markdown(tablefmt="rst"))
print()
>>>
AtenAsStrided¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenAsStrided
forward¶
def forward(self, x):
y = torch.as_strided(x, (2, 2, 8, 4), (128, 8, 16, 1))
return y
strict¶
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
nostrict¶
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
nostrict-oblivious¶
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
nostrict-decall¶
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%as_strided : [num_users=1] = call_function[target=torch.ops.aten.as_strided.default](args = (%x, [2, 2, 8, 4], [128, 8, 16, 1]), kwargs = {})
return (as_strided,)
tracing¶
inputs:
#1[(T1s2x2x8x8,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%as_strided : [num_users=1] = call_function[target=torch.as_strided](args = (%x, (2, 2, 8, 4), (128, 8, 16, 1)), kwargs = {})
return as_strided
AtenInterpolate¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenInterpolate
forward¶
def forward(self, x):
y = torch.nn.functional.interpolate(
x,
scale_factor=2.0,
mode="bilinear",
recompute_scale_factor=False,
)
return y
strict¶
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%upsample_bilinear2d : [num_users=1] = call_function[target=torch.ops.aten.upsample_bilinear2d.vec](args = (%x, None, False, [2.0, 2.0]), kwargs = {})
return (upsample_bilinear2d,)
nostrict¶
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%upsample_bilinear2d : [num_users=1] = call_function[target=torch.ops.aten.upsample_bilinear2d.vec](args = (%x, None, False, [2.0, 2.0]), kwargs = {})
return (upsample_bilinear2d,)
nostrict-oblivious¶
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%upsample_bilinear2d : [num_users=1] = call_function[target=torch.ops.aten.upsample_bilinear2d.vec](args = (%x, None, False, [2.0, 2.0]), kwargs = {})
return (upsample_bilinear2d,)
nostrict-decall¶
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%upsample_bilinear2d : [num_users=1] = call_function[target=torch.ops.aten.upsample_bilinear2d.vec](args = (%x, None, False, [2.0, 2.0]), kwargs = {})
return (upsample_bilinear2d,)
tracing¶
inputs:
#1[(T1s2x2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%interpolate : [num_users=1] = call_function[target=torch.nn.functional.interpolate](args = (%x,), kwargs = {size: None, scale_factor: 2.0, mode: bilinear, align_corners: None, recompute_scale_factor: False, antialias: False})
return interpolate
AtenNonZero¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenNonZero
forward¶
def forward(self, x):
y = torch.nonzero(x)
return y
strict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero : [num_users=2] = call_function[target=torch.ops.aten.nonzero.default](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%nonzero, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
return (nonzero,)
nostrict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero : [num_users=2] = call_function[target=torch.ops.aten.nonzero.default](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%nonzero, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
return (nonzero,)
nostrict-oblivious¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero : [num_users=2] = call_function[target=torch.ops.aten.nonzero.default](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%nonzero, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
return (nonzero,)
nostrict-decall¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero : [num_users=2] = call_function[target=torch.ops.aten.nonzero.default](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%nonzero, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_2,), kwargs = {})
%ge_1 : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_2, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_1, Runtime assertion failed for expression u0 >= 0 on node 'ge_1'), kwargs = {})
return (nonzero,)
tracing¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%nonzero : [num_users=1] = call_function[target=torch.nonzero](args = (%x,), kwargs = {})
return nonzero
AtenNonZeroTuple¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenNonZeroTuple
forward¶
def forward(self, x):
y = torch.nonzero(x, as_tuple=True)
return y[0], y[1]
strict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero_numpy : [num_users=2] = call_function[target=torch.ops.aten.nonzero_numpy.default](args = (%x,), kwargs = {})
%getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%nonzero_numpy, 0), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%getitem_2, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero_numpy, 1), kwargs = {})
return (getitem_2, getitem_1)
nostrict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero_numpy : [num_users=2] = call_function[target=torch.ops.aten.nonzero_numpy.default](args = (%x,), kwargs = {})
%getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%nonzero_numpy, 0), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%getitem_2, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero_numpy, 1), kwargs = {})
return (getitem_2, getitem_1)
nostrict-oblivious¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero_numpy : [num_users=2] = call_function[target=torch.ops.aten.nonzero_numpy.default](args = (%x,), kwargs = {})
%getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%nonzero_numpy, 0), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%getitem_2, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_1,), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero_numpy, 1), kwargs = {})
return (getitem_2, getitem_1)
nostrict-decall¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%nonzero : [num_users=3] = call_function[target=torch.ops.aten.nonzero.default](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%nonzero, 0), kwargs = {})
%sym_constrain_range_for_size_default : [num_users=0] = call_function[target=torch.ops.aten.sym_constrain_range_for_size.default](args = (%sym_size_int_2,), kwargs = {})
%ge_2 : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_2, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_2, Runtime assertion failed for expression u0 >= 0 on node 'ge_2'), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%nonzero, 1, 0, 1), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%nonzero, 1, 1, 2), kwargs = {})
%squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dims](args = (%slice_1, [1]), kwargs = {})
%squeeze_1 : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dims](args = (%slice_2, [1]), kwargs = {})
return (squeeze, squeeze_1)
tracing¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%nonzero : [num_users=2] = call_function[target=torch.nonzero](args = (%x,), kwargs = {as_tuple: True})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero, 1), kwargs = {})
return (getitem, getitem_1)
AtenRollPos¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenRollPos
forward¶
def forward(self, x):
return torch.roll(x, 1, -1)
strict¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [1], [-1]), kwargs = {})
return (roll,)
nostrict¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [1], [-1]), kwargs = {})
return (roll,)
nostrict-oblivious¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [1], [-1]), kwargs = {})
return (roll,)
nostrict-decall¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.ops.aten.arange.start_step](args = (0, 4), kwargs = {layout: torch.strided, device: cpu, pin_memory: False})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arange, 3), kwargs = {})
%fmod : [num_users=1] = call_function[target=torch.ops.aten.fmod.Scalar](args = (%add, 4), kwargs = {})
%index_select : [num_users=1] = call_function[target=torch.ops.aten.index_select.default](args = (%x, 2, %fmod), kwargs = {})
return (index_select,)
tracing¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%roll : [num_users=1] = call_function[target=torch.roll](args = (%x, 1, -1), kwargs = {})
return roll
AtenRollRelu¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.AtenRollRelu
forward¶
def forward(self, x):
return torch.relu(torch.roll(x, -1, -1))
strict¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [-1], [-1]), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%roll,), kwargs = {})
return (relu,)
nostrict¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [-1], [-1]), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%roll,), kwargs = {})
return (relu,)
nostrict-oblivious¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%roll : [num_users=1] = call_function[target=torch.ops.aten.roll.default](args = (%x, [-1], [-1]), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%roll,), kwargs = {})
return (relu,)
nostrict-decall¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.ops.aten.arange.start_step](args = (0, 4), kwargs = {layout: torch.strided, device: cpu, pin_memory: False})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arange, 1), kwargs = {})
%fmod : [num_users=1] = call_function[target=torch.ops.aten.fmod.Scalar](args = (%add, 4), kwargs = {})
%index_select : [num_users=1] = call_function[target=torch.ops.aten.index_select.default](args = (%x, 2, %fmod), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%index_select,), kwargs = {})
return (relu,)
tracing¶
inputs:
#1[(T1s2x3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%roll : [num_users=1] = call_function[target=torch.roll](args = (%x, -1, -1), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.relu](args = (%roll,), kwargs = {})
return relu
BuildInIsInstance¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.BuildInIsInstance
forward¶
def forward(self, x, lx: list | torch.Tensor):
if isinstance(lx, list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
return torch.sigmoid(self.linear(x)) - self.buff + lx
strict¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict-oblivious¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict-decall¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%linear_weight, [1, 0]), kwargs = {})
%addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%linear_bias, %x, %permute), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
%sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, %mul_1), kwargs = {})
return (add_15,)
tracing¶
- Traceback (most recent call last):
- File “~/vv/this312/lib/python3.12/site-packages/torch/fx/graph_module.py”, line 442, in __call__
- File “~/vv/this312/lib/python3.12/site-packages/torch/nn/modules/module.py”, line 1783, in _wrapped_call_impl
- File “~/vv/this312/lib/python3.12/site-packages/torch/nn/modules/module.py”, line 1794, in _call_impl
- File “<eval_with_key>.607 from ~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py:756 in forward”, line 9, in forward
- add = sub + lx; sub = lx = None
~~~~^~~~
TypeError: unsupported operand type(s) for +: ‘Tensor’ and ‘list’
- Call using an FX-traced Module, line 9 of the traced Module’s generated forward function:
sub = sigmoid - buff; sigmoid = buff = None add = sub + lx; sub = lx = None
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <— HERE
return add
- Traceback (most recent call last):
- File “~/vv/this312/lib/python3.12/site-packages/torch/fx/graph_module.py”, line 442, in __call__
- File “~/vv/this312/lib/python3.12/site-packages/torch/nn/modules/module.py”, line 1783, in _wrapped_call_impl
- File “~/vv/this312/lib/python3.12/site-packages/torch/nn/modules/module.py”, line 1794, in _call_impl
- File “<eval_with_key>.607 from ~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py:756 in forward”, line 9, in forward
- add = sub + lx; sub = lx = None
~~~~^~~~
TypeError: unsupported operand type(s) for +: ‘Tensor’ and ‘list’
- Call using an FX-traced Module, line 9 of the traced Module’s generated forward function:
sub = sigmoid - buff; sigmoid = buff = None add = sub + lx; sub = lx = None
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <— HERE
return add
FAILED
unsupported operand type(s) for +: 'Tensor' and 'list'
BuildInLen¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.BuildInLen
forward¶
def forward(self, x, lx: list):
t = lx[0] * lx[1].sum(axis=1, keepdim=True)
if len(lx) > 2:
t = t + lx[2].sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
strict¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict-oblivious¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict-decall¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
tracing¶
FAILED
len(.) expects an integer, len needs to be replaced. You should use _len.
ComplexPolar¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ComplexPolar
forward¶
def forward(self, x, angle):
return torch.polar(x, angle)
strict¶
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%angle : [num_users=2] = placeholder[target=angle]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %angle), kwargs = {})
%polar : [num_users=1] = call_function[target=torch.ops.aten.polar.default](args = (%x, %angle), kwargs = {})
return (polar,)
nostrict¶
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%angle : [num_users=2] = placeholder[target=angle]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %angle), kwargs = {})
%polar : [num_users=1] = call_function[target=torch.ops.aten.polar.default](args = (%x, %angle), kwargs = {})
return (polar,)
nostrict-oblivious¶
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%angle : [num_users=2] = placeholder[target=angle]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %angle), kwargs = {})
%polar : [num_users=1] = call_function[target=torch.ops.aten.polar.default](args = (%x, %angle), kwargs = {})
return (polar,)
nostrict-decall¶
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%angle : [num_users=2] = placeholder[target=angle]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %angle), kwargs = {})
%polar : [num_users=1] = call_function[target=torch.ops.aten.polar.default](args = (%x, %angle), kwargs = {})
return (polar,)
tracing¶
inputs:
#1[(T1s4x4,T1s4x4)]shapes:
dict(x:{0:Dim(batch)},angle:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%angle : [num_users=1] = placeholder[target=angle]
%polar : [num_users=1] = call_function[target=torch.polar](args = (%x, %angle), kwargs = {})
return polar
ControlFlowCond¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCond
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x)
def false_fn(x):
return torch.cos(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
strict¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-decall¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
tracing¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn_0]
%_cb_cond_false_fn_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn_0, %_cb_cond_false_fn_0, [%x]), kwargs = {})
return condcc
ControlFlowCond2Inputs¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCond2Inputs
forward¶
def forward(self, x, y):
def true_fn(x, y):
return torch.sin(x), torch.cos(x) + y
def false_fn(x, y):
return torch.cos(x), torch.sin(x) + y
return torch.cond(x.sum() > 0, true_fn, false_fn, [x, y])
strict¶
- ~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/variables/user_defined.py:1815: FutureWarning: isinstance(treespec, LeafSpec) is deprecated, use isinstance(treespec, TreeSpec) and treespec.is_leaf() instead.
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %y)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict¶
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %y)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict-oblivious¶
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %y)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict-decall¶
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %y)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
tracing¶
inputs:
#1[(T1s5x3,T1s5x3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=1] = placeholder[target=y]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn_0]
%_cb_cond_false_fn_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn_0, %_cb_cond_false_fn_0, [%x, %y]), kwargs = {})
return condcc
ControlFlowCond2Outputs¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCond2Outputs
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x), torch.cos(x)
def false_fn(x):
return torch.cos(x), torch.sin(x)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
strict¶
- ~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/variables/user_defined.py:1815: FutureWarning: isinstance(treespec, LeafSpec) is deprecated, use isinstance(treespec, TreeSpec) and treespec.is_leaf() instead.
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict-oblivious¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
nostrict-decall¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=2] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 1), kwargs = {})
return (getitem, getitem_1)
tracing¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn_0]
%_cb_cond_false_fn_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn_0, %_cb_cond_false_fn_0, [%x]), kwargs = {})
return condcc
ControlFlowCondConstant¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCondConstant
forward¶
def forward(self, x):
def true_fn(x):
return torch.sin(x) - torch.ones(x.shape, dtype=x.dtype)
def false_fn(x):
return torch.cos(x) + torch.ones((1, 1024), dtype=x.dtype)
return torch.cond(x.sum() > 0, true_fn, false_fn, [x])
strict¶
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict¶
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-decall¶
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
tracing¶
inputs:
#1[(T1s1024x1024,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn_0]
%_cb_cond_false_fn_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn_0, %_cb_cond_false_fn_0, [%x]), kwargs = {})
return condcc
ControlFlowCondIdentity_153832¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCondIdentity_153832
forward¶
def forward(self, x, y):
def branch_cond_then_1(x):
x = torch.abs(x) + 1
return x
def branch_cond_else_1(x):
return x # fails but succeeds with x.clone()
x = torch.cond(x.sum() > 0, branch_cond_then_1, branch_cond_else_1, [x])
return x + y
strict¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_flat_args_0_ and l_flat_args_0_ in
graph():
%l_flat_args_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_flat_args_0_]
return (l_flat_args_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
nostrict¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 216, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1106, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
nostrict-oblivious¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 216, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1106, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
nostrict-decall¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 216, in _cond_op_wrapper
return cond_op(*args, **kwargs)
File "~/vv/this312/lib/python3.12/site-packages/torch/_export/non_strict_utils.py", line 1106, in __torch_function__
return func(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
tracing¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Input-to-output aliasing detected at nodes l_args_3_0_ and l_args_3_0_ in
graph():
%l_args_3_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=1] = placeholder[target=l_args_3_0_]
return (l_args_3_0_,)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/cond.py", line 216, in _cond_op_wrapper
return cond_op(*args, **kwargs)
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
ControlFlowCondNestedModule¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCondNestedModule
forward¶
def forward(self, x):
def true_fn(x):
return self.submodule(x)
def false_fn(x):
return x - self.weight
y = torch.cond(x.sum() > 0, true_fn, false_fn, [x])
return y
strict¶
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%submodule_weight : [num_users=1] = get_attr[target=submodule.weight]
%weight : [num_users=1] = get_attr[target=weight]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %submodule_weight, %weight)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict¶
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%weight : [num_users=1] = get_attr[target=weight]
%submodule_weight : [num_users=1] = get_attr[target=submodule.weight]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %submodule_weight, %weight)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%weight : [num_users=1] = get_attr[target=weight]
%submodule_weight : [num_users=1] = get_attr[target=submodule.weight]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %submodule_weight, %weight)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-decall¶
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%weight : [num_users=1] = get_attr[target=weight]
%submodule_weight : [num_users=1] = get_attr[target=submodule.weight]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x, %submodule_weight, %weight)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
tracing¶
inputs:
#1[(T7s2,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn_0]
%_cb_cond_false_fn_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn_0, %_cb_cond_false_fn_0, [%x]), kwargs = {})
return condcc
ControlFlowCondNonZero¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowCondNonZero
forward¶
def forward(self, input_ids, image_features, vocab_size):
def then_branch(input_ids, image_features, vocab_size):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
condition = (input_ids < 0) & (input_ids > -int(1e9))
positions = torch.nonzero(condition, as_tuple=True)
input_ids = input_ids.clamp_min(0).clamp_max(vocab_size)
return (input_ids, positions[0], positions[1])
def else_branch(input_ids, image_features, vocab_size):
r = torch.where(torch.zeros((1, 1), dtype=torch.bool))
return (input_ids, r[0], r[1])
a, b, c = torch.cond(
image_features.numel() > 0,
then_branch,
else_branch,
[input_ids, image_features, vocab_size],
)
return a, b, c
strict¶
FAILED
Cond doesn't work unless it is captured completely with torch.compile. Got Encountered aliasing during higher order op tracing
Explanation: Higher order ops do not support aliasing. Found in cond
Hint: Replace `return input` with `return input.clone()` to avoid aliasing.
Hint: Consider using the debug context to change user code to avoid aliasing.
Hint: Please open an issue.
Developer debug context: Output-to-output aliasing detected at nodes child and child_1 in
graph():
%l_flat_args_0_ : torch._subclasses.fake_tensor.FakeTensor [num_users=2] = placeholder[target=l_flat_args_0_]
%size : [num_users=2] = call_method[target=size](args = (%l_flat_args_0_,), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%size, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%size, 1), kwargs = {})
%input_ids : [num_users=3] = call_method[target=view](args = (%l_flat_args_0_, -1, 12), kwargs = {})
%lt : [num_users=1] = call_function[target=operator.lt](args = (%input_ids, 0), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%input_ids, -1000000000), kwargs = {})
%condition : [num_users=1] = call_function[target=operator.and_](args = (%lt, %gt), kwargs = {})
%nonzero : [num_users=2] = call_function[target=torch.nonzero](args = (%condition,), kwargs = {as_tuple: True})
%child : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero, 0), kwargs = {})
%child_1 : [num_users=1] = call_function[target=operator.getitem](args = (%nonzero, 1), kwargs = {})
%clamp_min : [num_users=1] = call_method[target=clamp_min](args = (%input_ids, 0), kwargs = {})
%input_ids_1 : [num_users=1] = call_method[target=clamp_max](args = (%clamp_min, 1025), kwargs = {})
return (input_ids_1, child, child_1)
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0040.html
nostrict¶
FAILED
Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
nostrict-oblivious¶
FAILED
Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
nostrict-decall¶
FAILED
Expect operands to be a tuple of possibly nested dict/list/tuple that only consists of tensor leaves, but got [FakeTensor(..., size=(s72, 12), dtype=torch.int64), FakeTensor(..., size=(s28, s11)), 1025].
tracing¶
inputs:
#2[(T7s2x12,T1s2x16,int),(T7s2x12,T1s2x0,int)]shapes:
({0:Dim(batch)},{0:Dim(batch),1:Dim(seq_length)},None)
graph():
%input_ids : [num_users=1] = placeholder[target=input_ids]
%image_features : [num_users=2] = placeholder[target=image_features]
%vocab_size : [num_users=1] = placeholder[target=vocab_size]
%numel : [num_users=1] = call_method[target=numel](args = (%image_features,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%numel, 0), kwargs = {})
%_cb_cond_then_branch_0 : [num_users=1] = get_attr[target=_cb_cond_then_branch_0]
%_cb_cond_else_branch_0 : [num_users=1] = get_attr[target=_cb_cond_else_branch_0]
%condcc : [num_users=3] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_then_branch_0, %_cb_cond_else_branch_0, [%input_ids, %image_features, %vocab_size]), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%condcc, 2), kwargs = {})
return (getitem, getitem_1, getitem_2)
ControlFlowNestCond¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowNestCond
forward¶
def forward(self, x):
def true_fn2(x):
def true_fn1(x):
return torch.sin(x)
def false_fn1(x):
return torch.cos(x)
return torch.cond(x.sum() < 0, true_fn1, false_fn1, [x])
def false_fn2(x):
return -x
return torch.cond(x.sum() > 0, true_fn2, false_fn2, [x])
strict¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
nostrict-decall¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%x, []), kwargs = {})
%gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {})
%true_graph_0 : [num_users=1] = get_attr[target=true_graph_0]
%false_graph_0 : [num_users=1] = get_attr[target=false_graph_0]
%cond : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %true_graph_0, %false_graph_0, (%x,)), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%cond, 0), kwargs = {})
return (getitem,)
tracing¶
inputs:
#1[(T1s5x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%sum_1 : [num_users=1] = call_method[target=sum](args = (%x,), kwargs = {})
%gt : [num_users=1] = call_function[target=operator.gt](args = (%sum_1, 0), kwargs = {})
%_cb_cond_true_fn2_0 : [num_users=1] = get_attr[target=_cb_cond_true_fn2_0]
%_cb_cond_false_fn2_0 : [num_users=1] = get_attr[target=_cb_cond_false_fn2_0]
%condcc : [num_users=1] = call_function[target=torch.ops.higher_order.cond](args = (%gt, %_cb_cond_true_fn2_0, %_cb_cond_false_fn2_0, [%x]), kwargs = {})
return condcc
ControlFlowScan¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScan
forward¶
def forward(self, x):
init = torch.zeros_like(x[0])
carry, _out = torch.ops.higher_order.scan(
ControlFlowScan.add, [init], [x], additional_inputs=[]
)
return carry
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: UserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#1[(T1s3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%zeros_like : [num_users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%select,), kwargs = {pin_memory: False})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T1s3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%zeros_like : [num_users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%select,), kwargs = {pin_memory: False})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem,)
nostrict-decall¶
FAILED
scan might be aliasing the input or the output!
While executing %scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like], [%x], ()), kwargs = {})
Original traceback:
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 384, in forward
carry, _out = torch.ops.higher_order.scan(
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
ControlFlowScan2Carried¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScan2Carried
forward¶
def forward(self, x):
init1 = torch.zeros_like(x[0])
init2 = torch.ones_like(x[0])
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
ControlFlowScan2Carried.add,
[init1, init2],
[x, x * 2],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return carry1, carry2, out1, out2
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: UserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%zeros_like : [num_users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%select,), kwargs = {pin_memory: False})
%select_1 : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%ones_like : [num_users=1] = call_function[target=torch.ops.aten.ones_like.default](args = (%select_1,), kwargs = {pin_memory: False})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%x, 2), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 2), kwargs = {})
%getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 3), kwargs = {})
return (getitem, getitem_1, getitem_2, getitem_3)
nostrict-oblivious¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%zeros_like : [num_users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%select,), kwargs = {pin_memory: False})
%select_1 : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%x, 0, 0), kwargs = {})
%ones_like : [num_users=1] = call_function[target=torch.ops.aten.ones_like.default](args = (%select_1,), kwargs = {pin_memory: False})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%x, 2), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
%getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 2), kwargs = {})
%getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 3), kwargs = {})
return (getitem, getitem_1, getitem_2, getitem_3)
nostrict-decall¶
FAILED
scan might be aliasing the input or the output!
While executing %scan : [num_users=4] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%zeros_like, %ones_like], [%x, %mul], ()), kwargs = {})
Original traceback:
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 403, in forward
carry1, carry2, out1, out2 = torch.ops.higher_order.scan(
Use tlparse to see full graph. (https://github.com/pytorch/tlparse?tab=readme-ov-file#tlparse-parse-structured-pt2-logs)
tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
ControlFlowScanCDist¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScanCDist
forward¶
def forward(self, x):
_carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDist.dist,
[x],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: UserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], ()), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-oblivious¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], ()), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-decall¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%x], [%x], ()), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
ControlFlowScanCDist2¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScanCDist2
forward¶
def forward(self, x):
z = torch.tensor([0], dtype=torch.float32)
y = x.clone()
out = torch.ops.higher_order.scan(
ControlFlowScanCDist2.dist,
[z],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[y],
)
return out[1]
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: UserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%detach_ : [num_users=1] = call_function[target=torch.ops.aten.detach_.default](args = (%lift_fresh_copy,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%detach_], [%x], (%clone,)), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-oblivious¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%detach_ : [num_users=1] = call_function[target=torch.ops.aten.detach_.default](args = (%lift_fresh_copy,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%detach_], [%x], (%clone,)), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-decall¶
inputs:
#1[(T1s3x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%lifted_tensor_0,), kwargs = {})
%clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%clone], [%x], (%clone_1,)), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
tracing¶
FAILED
(CustomProxy(clone),) can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>,)
ControlFlowScanCDistXY¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScanCDistXY
forward¶
def forward(self, x, y):
_carry, out = torch.ops.higher_order.scan(
ControlFlowScanCDistXY.dist,
[y],
[x],
# dim=0, # 01/31/2025, not supported anymore
additional_inputs=[],
)
return out
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: UserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], ()), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=2] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], ()), kwargs = {})
%getitem : [num_users=0] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
nostrict-decall¶
inputs:
#2[(T1s3x4,T1s5x4),(T1s13x14,T1s15x14)]shapes:
dict(x:{0:Dim(x_rows),1:Dim(dim)},y:{0:Dim(y_rows),1:Dim(dim)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [%y], [%x], ()), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 1), kwargs = {})
return (getitem_1,)
tracing¶
FAILED
Unable to symbolically trace HigherOrderOperators
ControlFlowScanDecomposition_151564¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScanDecomposition_151564
forward¶
def forward(self, images, position):
return self.select_when_exporting(self.dummy_loop, self.dummy_loop_with_scan)(
images, position
)
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: NestedUserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
FAILED
Runtime assertion failed for expression u0 >= 2 on node 'ge'
nostrict-oblivious¶
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
FAILED
Runtime assertion failed for expression u0 >= 2 on node 'ge'
nostrict-decall¶
inputs:
#1[(T1s5x6,T7s5)]shapes:
dict(images:{0:DYNAMIC,1:DYNAMIC},position:{0:DYNAMIC})
FAILED
Runtime assertion failed for expression u4 >= 2 on node 'ge_4'
tracing¶
FAILED
'CustomProxy' object cannot be interpreted as an integer
ControlFlowScanInplace_153705¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowScanInplace_153705
forward¶
def forward(self, x, y):
def loop_body_1(z, iv, x, y):
z = z.clone()
i = iv.item()
z[i, :] = ((x[i, :] - y) ** 2).sum(dim=-1)
return [z, iv]
z = torch.empty((x.shape[0], y.shape[0]))
r = torch.ops.higher_order.scan(
loop_body_1, [z], [torch.arange(x.shape[0], dtype=torch.int64)], [x, y]
)
return r[0]
strict¶
FAILED
scan must be captured completely with torch.compile. Got torch.scan: improper combine_fn
Explanation: Expected combine_fn to be wrapped as functools.partial in scan user-facing api or a graph module if we're re-exporting but got <class 'function'>.
Hint: This graph break may be difficult to debug. Please report an issue to PyTorch for assistance.
Developer debug context: NestedUserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0342.html
nostrict¶
FAILED
only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
nostrict-oblivious¶
FAILED
only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
nostrict-decall¶
FAILED
only integers, slices (`:`), ellipsis (`...`), None and long or byte Variables are valid indices (got SymInt)
tracing¶
FAILED
(CustomProxy(x), CustomProxy(y)) can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>, <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>)
ControlFlowWhileDec¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowWhileDec
forward¶
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i > 0
def body_fn(i, x, y):
return i - 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
strict¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
User stack:
File "~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/functional_export.py", line 214, in forward
res = self._export_root(*args, **kwargs)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 581, in forward
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 221, in while_loop
return while_loop_op(flat_cond_fn, flat_body_fn, tuple(flat_inputs), tuple())
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 218, in flat_body_fn
return body_fn(*carried, *additional)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 579, in body_fn
return i - 1, x + y, y - x
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
FAILED
[CustomProxy(ci), CustomProxy(a), CustomProxy(b)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>, <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>, <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>)
ControlFlowWhileInc¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ControlFlowWhileInc
forward¶
def forward(self, ci, a, b):
def cond_fn(i, x, y):
return i < x.size(0)
def body_fn(i, x, y):
return i + 1, x + y, y - x
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
strict¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
User stack:
File "~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/functional_export.py", line 214, in forward
res = self._export_root(*args, **kwargs)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 595, in forward
return torch._higher_order_ops.while_loop(cond_fn, body_fn, [ci, a, b])
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 221, in while_loop
return while_loop_op(flat_cond_fn, flat_body_fn, tuple(flat_inputs), tuple())
File "~/vv/this312/lib/python3.12/site-packages/torch/_higher_order_ops/while_loop.py", line 218, in flat_body_fn
return body_fn(*carried, *additional)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 593, in body_fn
return i + 1, x + y, y - x
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
Constraints violated (L['a'].size()[1])! For more information, run with TORCH_LOGS="+dynamic".
- You marked L['a'].size()[1] as dynamic but your code specialized it to be a constant (3). If you're using mark_dynamic, either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO.
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
FAILED
[CustomProxy(ci), CustomProxy(a), CustomProxy(b)] can only be of (<class 'torch.Tensor'>, <class 'int'>, <class 'torch.SymInt'>) but got (<class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>, <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>, <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>)
CreateFromShape¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.CreateFromShape
forward¶
def forward(self, x):
y = torch.ones((x.shape[0], x.shape[1] + 1))
return y
strict¶
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict¶
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict-oblivious¶
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict-decall¶
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([%sym_size_int_2, %add], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
return (full,)
tracing¶
inputs:
#2[(T1s4x4,),(T1s5x5,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=2] = placeholder[target=x]
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 0), kwargs = {})
%getattr_2 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_2, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%getitem_1, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ones](args = ((%getitem, %add),), kwargs = {})
return ones
CreateFromShapeThroughFunction¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.CreateFromShapeThroughFunction
forward¶
def forward(self, x):
dy1 = CreateFromShapeThroughFunction.add_one(x.shape[1])
y = torch.ones((x.shape[0], dy1))
return y
strict¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict-oblivious¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ops.aten.ones.default](args = ([%sym_size_int_2, %add],), kwargs = {device: cpu, pin_memory: False})
return (ones,)
nostrict-decall¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sym_size_int_3, 1), kwargs = {})
%full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([%sym_size_int_2, %add], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
return (full,)
tracing¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(dx),1:Dim(dy)})
graph():
%x : [num_users=2] = placeholder[target=x]
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%getitem, 1), kwargs = {})
%getattr_2 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_2, 0), kwargs = {})
%ones : [num_users=1] = call_function[target=torch.ones](args = ((%getitem_1, %add),), kwargs = {})
return ones
CropLastDimensionWithTensorContent¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.CropLastDimensionWithTensorContent
forward¶
def forward(self, x, shape):
return x[..., : shape[0]]
strict¶
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
graph():
%x : [num_users=2] = placeholder[target=x]
%shape : [num_users=2] = placeholder[target=shape]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %shape), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%shape, 0, 0), kwargs = {})
%item : [num_users=1] = call_function[target=torch.ops.aten.item.default](args = (%select,), kwargs = {})
%slice_1 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %item), kwargs = {})
%sym_size_int_1 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%slice_1, 2), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_1, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u1 >= 0 on node 'ge'), kwargs = {})
%le_1 : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int_1, 4), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le_1, Runtime assertion failed for expression u1 <= 4 on node 'le_1'), kwargs = {})
return (slice_1,)
nostrict¶
FAILED
Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (_export/non_strict_utils.py:1121 in __torch_function__)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
The following call raised this error:
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 843, in forward
return x[..., : shape[0]]
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (_export/non_strict_utils.py:1121 in __torch_function__)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
The following call raised this error:
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 843, in forward
return x[..., : shape[0]]
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
Could not extract specialized integer from data-dependent expression u0 (unhinted: u0). (Size-like symbols: none)
Caused by: (_export/non_strict_utils.py:1121 in __torch_function__)
For more information, run with TORCH_LOGS="dynamic"
For extended logs when we create symbols, also add TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="u0"
If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
For more debugging help, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing
For C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1
The following call raised this error:
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 843, in forward
return x[..., : shape[0]]
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
inputs:
#2[(T1s3x4x4,T7s1),(T1s6x4x4,T7s1)]shapes:
dict(x:{0:Dim(batch)},shape:{})
graph():
%x : [num_users=1] = placeholder[target=x]
%shape : [num_users=1] = placeholder[target=shape]
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%shape, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%x, (Ellipsis, slice(None, getitem, None))), kwargs = {})
return getitem_1
CropLastDimensionWithTensorShape¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.CropLastDimensionWithTensorShape
forward¶
def forward(self, x, y):
return x[..., : y.shape[0]]
strict¶
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, 0, %sym_size_int_2), kwargs = {})
return (slice_1,)
nostrict¶
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, None, %sym_size_int_2), kwargs = {})
return (slice_1,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, None, %sym_size_int_2), kwargs = {})
return (slice_1,)
nostrict-decall¶
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_4 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 2, None, %sym_size_int_4), kwargs = {})
return (slice_1,)
tracing¶
inputs:
#2[(T1s3x4x4,T1s2),(T1s6x4x4,T1s3)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(crop)})
graph():
%x : [num_users=1] = placeholder[target=x]
%y : [num_users=1] = placeholder[target=y]
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%y, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%x, (Ellipsis, slice(None, getitem, None))), kwargs = {})
return getitem_1
ExportWithDimension0¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ExportWithDimension0
forward¶
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
strict¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 0 for dimension inputs['x'].shape[0].
nostrict¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 0 for dimension inputs['x'].shape[0].
nostrict-oblivious¶
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_int_2,), kwargs = {dtype: torch.float32, device: cpu, pin_memory: False})
%reshape : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%arange, [-1, 1]), kwargs = {})
%matmul : [num_users=1] = call_function[target=torch.ops.aten.matmul.default](args = (%x, %reshape), kwargs = {})
return (matmul,)
nostrict-decall¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 0 for dimension inputs['x'].shape[0].
tracing¶
inputs:
#1[(T1s0x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 1), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.arange](args = (%getitem,), kwargs = {dtype: torch.float32})
%reshape : [num_users=1] = call_method[target=reshape](args = (%arange, (-1, 1)), kwargs = {})
%matmul : [num_users=1] = call_function[target=operator.matmul](args = (%x, %reshape), kwargs = {})
return matmul
ExportWithDimension1¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.ExportWithDimension1
forward¶
def forward(self, x):
return x @ torch.arange(x.shape[1], dtype=torch.float32).reshape((-1, 1))
strict¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 1 for dimension inputs['x'].shape[0].
nostrict¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 1 for dimension inputs['x'].shape[0].
nostrict-oblivious¶
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_2 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 1), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.ops.aten.arange.default](args = (%sym_size_int_2,), kwargs = {dtype: torch.float32, device: cpu, pin_memory: False})
%reshape : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%arange, [-1, 1]), kwargs = {})
%matmul : [num_users=1] = call_function[target=torch.ops.aten.matmul.default](args = (%x, %reshape), kwargs = {})
return (matmul,)
nostrict-decall¶
FAILED
Found the following conflicts between user-specified ranges and inferred ranges from model tracing:
- Received user-specified dim hint Dim.DYNAMIC(min=None, max=None), but export 0/1 specialized due to hint of 1 for dimension inputs['x'].shape[0].
tracing¶
inputs:
#1[(T1s1x3,)]shapes:
dict(x:{0:DYNAMIC,1:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%x, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 1), kwargs = {})
%arange : [num_users=1] = call_function[target=torch.arange](args = (%getitem,), kwargs = {dtype: torch.float32})
%reshape : [num_users=1] = call_method[target=reshape](args = (%arange, (-1, 1)), kwargs = {})
%matmul : [num_users=1] = call_function[target=operator.matmul](args = (%x, %reshape), kwargs = {})
return matmul
InplaceAdd¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceAdd
forward¶
def forward(self, x):
x += self.bias
return x
strict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict-decall¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %bias), kwargs = {})
%copy__default : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %add_3), kwargs = {})
return (copy__default,)
tracing¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %bias), kwargs = {})
return add
InplaceAdd2¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceAdd2
forward¶
def forward(self, x):
x.add_(self.bias)
return x
strict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
return (add_,)
nostrict-decall¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %bias), kwargs = {})
%copy__default : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %add_3), kwargs = {})
return (copy__default,)
tracing¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
return add_
InplaceAdd_Mul¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceAdd_Mul
forward¶
def forward(self, x):
x.add_(self.bias)
return x * 2
strict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_, 2), kwargs = {})
return (mul,)
nostrict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_, 2), kwargs = {})
return (mul,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_, 2), kwargs = {})
return (mul,)
nostrict-decall¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %bias), kwargs = {})
%mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 2), kwargs = {})
%copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %add_3), kwargs = {})
return (mul_4,)
tracing¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%x, %bias), kwargs = {})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%add_, 2), kwargs = {})
return mul
InplaceCloneAdd¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceCloneAdd_
forward¶
def forward(self, x):
x = x.clone()
x.add_(self.bias)
return x
strict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%clone, %bias), kwargs = {})
return (add_,)
nostrict¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%clone, %bias), kwargs = {})
return (add_,)
nostrict-oblivious¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%add_ : [num_users=1] = call_function[target=torch.ops.aten.add_.Tensor](args = (%clone, %bias), kwargs = {})
return (add_,)
nostrict-decall¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%bias : [num_users=1] = get_attr[target=bias]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%x,), kwargs = {})
%add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clone, %bias), kwargs = {})
return (add_6,)
tracing¶
inputs:
#2[(T1s3x4,),(T1s5x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%clone : [num_users=1] = call_method[target=clone](args = (%x,), kwargs = {})
%bias : [num_users=1] = get_attr[target=bias]
%add_ : [num_users=1] = call_method[target=add_](args = (%clone, %bias), kwargs = {})
return add_
InplaceSetItemEllipsis_1¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemEllipsis_1
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
strict¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
inputs:
#1[(T7s4,T1s8192x4)]shapes:
dict(index:{0:Dim(batch)},update:{0:Dim(batch),1:DYNAMIC})
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
InplaceSetItemEllipsis_2¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemEllipsis_2
forward¶
def forward(self, index, update):
copy = self.params.clone()
copy[..., index] = update
return copy
strict¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
L['update'].size()[0] = 8192 is not equal to L['index'].size()[0] = 4
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
inputs:
#1[(T7s4,T1s8192x4)]shapes:
dict(index:{0:Dim(batch)},update:{0:Dim(batch),1:DYNAMIC})
graph():
%index : [num_users=1] = placeholder[target=index]
%update : [num_users=1] = placeholder[target=update]
%_tensor_constant0 : [num_users=1] = get_attr[target=_tensor_constant0]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%_tensor_constant0, (Ellipsis, %index), %update), kwargs = {})
return setitem
InplaceSetItemMask¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemMask
forward¶
def forward(self, x):
mask = x.to(bool)
x[mask] = 2
return x
strict¶
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bool), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%x, [%to], %lift_fresh_copy), kwargs = {})
return (index_put_,)
nostrict¶
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bool), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%x, [%to], %lift_fresh_copy), kwargs = {})
return (index_put_,)
nostrict-oblivious¶
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bool), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%index_put_ : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%x, [%to], %lift_fresh_copy), kwargs = {})
return (index_put_,)
nostrict-decall¶
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x, None, None, torch.float32), kwargs = {device: cpu, layout: torch.strided})
%_to_copy : [num_users=1] = call_function[target=torch.ops.aten._to_copy.default](args = (%x,), kwargs = {dtype: torch.bool})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%lifted_tensor_0,), kwargs = {})
%index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%x, [%_to_copy], %clone), kwargs = {})
%copy__default : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %index_put), kwargs = {})
return (copy__default,)
tracing¶
inputs:
#2[(T1s2x3x3,),(T1s3x3x3,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=2] = placeholder[target=x]
%to : [num_users=1] = call_method[target=to](args = (%x, torch.bool), kwargs = {})
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, %to, 2), kwargs = {})
return setitem
InplaceSetItemSquare¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemSquare
forward¶
def forward(self, x):
x[:2, :3] = 1
return x
strict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
return (x,)
nostrict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
return (x,)
nostrict-oblivious¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%slice_1, 0), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_2, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%le_1 : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int_2, %sym_size_int_1), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le_1, Runtime assertion failed for expression u0 <= s77 on node 'le_1'), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
return (x,)
nostrict-decall¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_2, %clone), kwargs = {})
%slice_3 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_scatter : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_3, %copy, 1, 0, 3), kwargs = {})
%slice_scatter_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%x, %slice_scatter, 0, 0, 2), kwargs = {})
%copy__default : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %slice_scatter_1), kwargs = {})
return (copy__default,)
tracing¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
return setitem
InplaceSetItemSquareAdd¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemSquareAdd
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2
strict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
return (add,)
nostrict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
return (add,)
nostrict-oblivious¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%slice_1, 0), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_2, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%le_1 : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int_2, %sym_size_int_1), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le_1, Runtime assertion failed for expression u0 <= s77 on node 'le_1'), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
return (add,)
nostrict-decall¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_2, %clone), kwargs = {})
%slice_3 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_scatter : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_3, %copy, 1, 0, 3), kwargs = {})
%slice_scatter_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%x, %slice_scatter, 0, 0, 2), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_scatter_1, 2), kwargs = {})
%copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %slice_scatter_1), kwargs = {})
return (add,)
tracing¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=1] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
return add
InplaceSetItemSquareAdd2¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.InplaceSetItemSquareAdd2
forward¶
def forward(self, x):
x[:2, :3] = 1
return x + 2, x + 3
strict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {})
return (add, add_1)
nostrict¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {})
return (add, add_1)
nostrict-oblivious¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%sym_size_int_1 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%x, 0), kwargs = {})
%lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%slice_1, 0), kwargs = {})
%ge : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int_2, 0), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge, Runtime assertion failed for expression u0 >= 0 on node 'ge'), kwargs = {})
%le_1 : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int_2, %sym_size_int_1), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le_1, Runtime assertion failed for expression u0 <= s77 on node 'le_1'), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%fill_ : [num_users=0] = call_function[target=torch.ops.aten.fill_.Tensor](args = (%slice_2, %lift_fresh_copy), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 3), kwargs = {})
return (add, add_1)
nostrict-decall¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=5] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%lifted_tensor_0,), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_2 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_1, 1, 0, 3), kwargs = {})
%copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_2, %clone), kwargs = {})
%slice_3 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 2), kwargs = {})
%slice_scatter : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_3, %copy, 1, 0, 3), kwargs = {})
%slice_scatter_1 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%x, %slice_scatter, 0, 0, 2), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_scatter_1, 2), kwargs = {})
%add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_scatter_1, 3), kwargs = {})
%copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%x, %slice_scatter_1), kwargs = {})
return (add, add_4)
tracing¶
inputs:
#2[(T1s5x5,),(T1s7x5,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%setitem : [num_users=2] = call_function[target=operator.setitem](args = (%x, (slice(None, 2, None), slice(None, 3, None)), 1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%setitem, 2), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%setitem, 3), kwargs = {})
return (add, add_1)
SignatureFloat1¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureFloat1
forward¶
def forward(self, x, alpha: float = 2.0):
return torch.sigmoid(self.linear(x)) - self.buff * alpha
strict¶
FAILED
Guard failed: alpha == 1.5
nostrict¶
FAILED
Guard failed: alpha == 1.5
nostrict-oblivious¶
FAILED
Guard failed: alpha == 1.5
nostrict-decall¶
FAILED
Guard failed: alpha == 1.5
tracing¶
inputs:
#2[(T1s4x3,float),(T1s8x3,float)]shapes:
({0:Dim(batch)},None)
graph():
%x : [num_users=1] = placeholder[target=x]
%alpha : float [num_users=1] = placeholder[target=alpha](default=2.0)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%mul : [num_users=1] = call_method[target=mul](args = (%buff, %alpha), kwargs = {})
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %mul), kwargs = {})
return sub
SignatureInt1¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureInt1
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i : i + 1]
strict¶
FAILED
Guard failed: i == 1
nostrict¶
FAILED
Guard failed: i == 1
nostrict-oblivious¶
FAILED
Guard failed: i == 1
nostrict-decall¶
FAILED
Guard failed: i == 1
tracing¶
inputs:
#2[(T1s4x3,int),(T1s8x3,int)]shapes:
({0:Dim(batch)},None)
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=2] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%i, 1), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), slice(i, add, None))), kwargs = {})
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add_1
SignatureInt2¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureInt2
forward¶
def forward(self, x, i: int = 2):
return torch.sigmoid(self.linear(x)) - self.buff + x[:, i]
strict¶
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%i : [num_users=1] = placeholder[target=i]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %i), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})
return (add,)
nostrict¶
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%i : [num_users=1] = placeholder[target=i]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %i), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})
return (add,)
nostrict-oblivious¶
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%i : [num_users=1] = placeholder[target=i]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %i), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %select), kwargs = {})
return (add,)
nostrict-decall¶
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%i : [num_users=1] = placeholder[target=i]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %i), kwargs = {})
%permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%linear_weight, [1, 0]), kwargs = {})
%addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%linear_bias, %x, %permute), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
%sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%x, 0, 0, 9223372036854775807), kwargs = {})
%select : [num_users=1] = call_function[target=torch.ops.aten.select.int](args = (%slice_1, 1, 1), kwargs = {})
%add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %select), kwargs = {})
return (add_14,)
tracing¶
inputs:
#1[(T1s4x3,int)]shapes:
dict(x:{0:Dim(batch)},i:None)
graph():
%x : [num_users=2] = placeholder[target=x]
%i : int [num_users=1] = placeholder[target=i](default=2)
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%x, (slice(None, None, None), %i)), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %getitem), kwargs = {})
return add
SignatureListFixedLength¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureListFixedLength
forward¶
def forward(self, x, lx: list):
return (
torch.sigmoid(self.linear(x)) - self.buff + lx[0] * lx[1].sum(axis=1, keepdim=True)
)
strict¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict-oblivious¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
return (add,)
nostrict-decall¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=1] = get_attr[target=buff]
%x : [num_users=2] = placeholder[target=x]
%lx_0 : [num_users=2] = placeholder[target=lx_0]
%lx_1 : [num_users=2] = placeholder[target=lx_1]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %lx_0, %lx_1), kwargs = {})
%permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%linear_weight, [1, 0]), kwargs = {})
%addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%linear_bias, %x, %permute), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
%sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %buff), kwargs = {})
%sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lx_1, [1], True), kwargs = {})
%mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lx_0, %sum_1), kwargs = {})
%add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %mul_4), kwargs = {})
return (add_15,)
tracing¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#2[T1s8x1,T1s8x2])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list [num_users=2] = placeholder[target=lx]
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 0), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%lx, 1), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%getitem_1,), kwargs = {axis: 1, keepdim: True})
%mul : [num_users=1] = call_function[target=operator.mul](args = (%getitem, %sum_1), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %mul), kwargs = {})
return add
SignatureListFixedWithNone¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureListFixedWithNone
forward¶
def forward(self, lx):
x = lx[0]
if lx[1] is not None:
x += lx[1]
if lx[2] is not None:
x += lx[2]
return x
strict¶
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict¶
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-oblivious¶
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
nostrict-decall¶
FAILED
Detected mismatch between the structure of `inputs` and `dynamic_shapes`: `inputs['lx']` has 3 elements, but `dynamic_shapes['lx']` has 2 elements
For more information about this error, see: https://pytorch.org/docs/main/generated/exportdb/index.html#dynamic-shapes-validation
The error above occurred when calling torch.export.export. If you would like to view some more information about this error, and get a list of all other errors that may occur in your export call, you can replace your `export()` call with `draft_export()`.
tracing¶
FAILED
Unable to clone type <class 'NoneType'>, x=None into numpy
SignatureListVariableLength¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureListVariableLength
forward¶
def forward(self, x, lx: list):
t = torch.cat(lx, dim=1).sum(axis=1, keepdim=True)
return torch.sigmoid(self.linear(x)) - self.buff + t
strict¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict-oblivious¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
nostrict-decall¶
FAILED
Trying to flatten user inputs with exported input tree spec:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*])]),
TreeSpec(dict, [], [])])
but actually got inputs with tree spec of:
TreeSpec(tuple, None, [TreeSpec(tuple, None, [*,
TreeSpec(list, None, [*,
*,
*])]),
TreeSpec(dict, [], [])]).
Please check that the inputs have the same number and type of args and kwargs as the ones you used when tracing.
tracing¶
inputs:
#2[(T1s4x3,#2[T1s4x1,T1s4x2]),(T1s8x3,#3[T1s8x1,T1s8x2,T1s8x3])]shapes:
dict(x:{0:Dim(batch)},lx:#2[{0:Dim(batch)},{0:Dim(batch)}])
graph():
%x : [num_users=1] = placeholder[target=x]
%lx : list [num_users=1] = placeholder[target=lx]
%cat : [num_users=1] = call_function[target=torch.cat](args = (%lx, 1), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%cat,), kwargs = {axis: 1, keepdim: True})
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%buff : [num_users=1] = get_attr[target=buff]
%sub : [num_users=1] = call_function[target=operator.sub](args = (%sigmoid, %buff), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sub, %sum_1), kwargs = {})
return add
SignatureShapeAsIndex¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.SignatureShapeAsIndex
forward¶
def forward(self, x, y):
t = torch.sigmoid(self.linear(x)) + x
return t[:, : y.shape[1]]
strict¶
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_4 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %x), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%add, 1, 0, %sym_size_int_4), kwargs = {})
return (slice_1,)
nostrict¶
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=0] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %x), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%add, 1, None, %sym_size_int_3), kwargs = {})
return (slice_1,)
nostrict-oblivious¶
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=0] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_3 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 1), kwargs = {})
%linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %linear_weight, %linear_bias), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%linear,), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %x), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%add, 1, None, %sym_size_int_3), kwargs = {})
return (slice_1,)
nostrict-decall¶
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
graph():
%linear_weight : [num_users=1] = get_attr[target=linear.weight]
%linear_bias : [num_users=1] = get_attr[target=linear.bias]
%buff : [num_users=0] = get_attr[target=buff]
%x : [num_users=3] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_6 : [num_users=1] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 1), kwargs = {})
%permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%linear_weight, [1, 0]), kwargs = {})
%addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%linear_bias, %x, %permute), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
%add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %x), kwargs = {})
%slice_1 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%add_6, 1, None, %sym_size_int_6), kwargs = {})
return (slice_1,)
tracing¶
inputs:
#1[(T1s4x3,T1s4x2)]shapes:
dict(x:{0:Dim(batch)},y:{0:Dim(batch),1:Dim(length)})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=1] = placeholder[target=y]
%linear : [num_users=1] = call_module[target=linear](args = (%x,), kwargs = {})
%sigmoid : [num_users=1] = call_function[target=torch.sigmoid](args = (%linear,), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%sigmoid, %x), kwargs = {})
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%y, shape), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%getattr_1, 1), kwargs = {})
%getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%add, (slice(None, None, None), slice(None, getitem, None))), kwargs = {})
return getitem_1
TypeBFloat16¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.TypeBFloat16
forward¶
def forward(self, x):
xb = x.to(torch.bfloat16)
return (xb + xb).to(torch.float32)
strict¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bfloat16), kwargs = {})
%add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%to, %to), kwargs = {})
%_assert_tensor_metadata_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%add,), kwargs = {dtype: torch.bfloat16, device: cpu, layout: torch.strided})
%to_1 : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%add, torch.float32), kwargs = {})
return (to_1,)
nostrict¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bfloat16), kwargs = {})
%add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%to, %to), kwargs = {})
%_assert_tensor_metadata_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%add,), kwargs = {dtype: torch.bfloat16, device: cpu, layout: torch.strided})
%to_1 : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%add, torch.float32), kwargs = {})
return (to_1,)
nostrict-oblivious¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata_default : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x,), kwargs = {dtype: torch.float32, device: cpu, layout: torch.strided})
%to : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%x, torch.bfloat16), kwargs = {})
%add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%to, %to), kwargs = {})
%_assert_tensor_metadata_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%add,), kwargs = {dtype: torch.bfloat16, device: cpu, layout: torch.strided})
%to_1 : [num_users=1] = call_function[target=torch.ops.aten.to.dtype](args = (%add, torch.float32), kwargs = {})
return (to_1,)
nostrict-decall¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=3] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%_assert_tensor_metadata : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%x, None, None, torch.float32), kwargs = {device: cpu, layout: torch.strided})
%_to_copy : [num_users=1] = call_function[target=torch.ops.aten._to_copy.default](args = (%x,), kwargs = {dtype: torch.bfloat16})
%add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_to_copy, %_to_copy), kwargs = {})
%_assert_tensor_metadata_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_tensor_metadata.default](args = (%add_3, None, None, torch.bfloat16), kwargs = {device: cpu, layout: torch.strided})
%_to_copy_1 : [num_users=1] = call_function[target=torch.ops.aten._to_copy.default](args = (%add_3,), kwargs = {dtype: torch.float32})
return (_to_copy_1,)
tracing¶
inputs:
#1[(T1s4x4,)]shapes:
dict(x:{0:Dim(batch)})
graph():
%x : [num_users=1] = placeholder[target=x]
%to : [num_users=1] = call_method[target=to](args = (%x, torch.bfloat16), kwargs = {})
%add : [num_users=1] = call_function[target=operator.add](args = (%to, %to), kwargs = {})
%to_1 : [num_users=1] = call_method[target=to](args = (%add, torch.float32), kwargs = {})
return to_1
Vmap¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.Vmap
forward¶
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return torch.vmap(f)(x, y)
strict¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=3] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_2 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {})
%_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (%sym_size_int_2, error), kwargs = {})
%_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%x, 0, 1), kwargs = {})
%_add_batch_dim_1 : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%y, 0, 1), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
%_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%add, 1, %sym_size_int_2, 0), kwargs = {})
%_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {})
return (_remove_batch_dim,)
nostrict¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=3] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_3 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {})
%_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (%sym_size_int_3, error), kwargs = {})
%_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%x, 0, 1), kwargs = {})
%_add_batch_dim_1 : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%y, 0, 1), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
%_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%add, 1, %sym_size_int_3, 0), kwargs = {})
%_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {})
return (_remove_batch_dim,)
nostrict-oblivious¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=3] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%sym_size_int_3 : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%y, 0), kwargs = {})
%lazy_load_decompositions : [num_users=0] = call_function[target=torch._functorch.predispatch.lazy_load_decompositions](args = (), kwargs = {})
%_vmap_increment_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_increment_nesting](args = (%sym_size_int_3, error), kwargs = {})
%_add_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%x, 0, 1), kwargs = {})
%_add_batch_dim_1 : [num_users=1] = call_function[target=torch._functorch.predispatch._add_batch_dim](args = (%y, 0, 1), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_add_batch_dim, %_add_batch_dim_1), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
%_remove_batch_dim : [num_users=1] = call_function[target=torch._functorch.predispatch._remove_batch_dim](args = (%add, 1, %sym_size_int_3, 0), kwargs = {})
%_vmap_decrement_nesting : [num_users=0] = call_function[target=torch._functorch.predispatch._vmap_decrement_nesting](args = (), kwargs = {})
return (_remove_batch_dim,)
nostrict-decall¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%x, %y), kwargs = {})
%_to_copy : [num_users=1] = call_function[target=torch.ops.aten._to_copy.default](args = (1,), kwargs = {dtype: torch.float32})
%add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %_to_copy), kwargs = {})
return (add_2,)
tracing¶
FAILED
vmap(<lambda>, in_dims=0, ...)(<inputs>): Got in_dim=0 for an input but the input is of type <class 'experimental_experiment.torch_interpreter.tracing.CustomProxy'>. We cannot vmap over non-Tensor arguments, please use None as the respective in_dim
VmapPython¶
code: onnx_diagnostic.torch_export_patches.eval.model_cases.VmapPython
forward¶
def forward(self, x, y):
f = lambda x, y: x * y + 1 # noqa: E731
return patched_vmap(f)(x, y)
strict¶
FAILED
Can't extract message from torch._check()
Explanation: The second argument of torch._check() must be a functiondefined within the torch.compile regionthat does not reference a non-local variable.
Hint: Make sure the message function is defined in the torch.compile region.
Hint: Remove any closure variables, e.g. remove references to closure variable `x` in `lambda: f'{x} failed check'`
Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.
Developer debug context: NestedUserFunctionVariable()
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0288.html
from user code:
File "~/vv/this312/lib/python3.12/site-packages/torch/_dynamo/functional_export.py", line 214, in forward
res = self._export_root(*args, **kwargs)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/eval/model_cases.py", line 905, in forward
return patched_vmap(f)(x, y)
File "~/github/onnx-diagnostic/onnx_diagnostic/torch_export_patches/patches/patch_torch.py", line 751, in wrapped
torch._check(
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
nostrict¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%movedim : [num_users=1] = call_function[target=torch.ops.aten.movedim.int](args = (%x, 0, 0), kwargs = {})
%movedim_1 : [num_users=1] = call_function[target=torch.ops.aten.movedim.int](args = (%y, 0, 0), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [], [%movedim, %movedim_1], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
return (getitem,)
nostrict-oblivious¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%movedim : [num_users=1] = call_function[target=torch.ops.aten.movedim.int](args = (%x, 0, 0), kwargs = {})
%movedim_1 : [num_users=1] = call_function[target=torch.ops.aten.movedim.int](args = (%y, 0, 0), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [], [%movedim, %movedim_1], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
return (getitem,)
nostrict-decall¶
inputs:
#1[(T1s3,T1s3)]shapes:
dict(x:{0:DYNAMIC},y:{0:DYNAMIC})
graph():
%x : [num_users=2] = placeholder[target=x]
%y : [num_users=2] = placeholder[target=y]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x, %y), kwargs = {})
%permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%x, [0]), kwargs = {})
%permute_1 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%y, [0]), kwargs = {})
%scan_combine_graph_0 : [num_users=1] = get_attr[target=scan_combine_graph_0]
%scan : [num_users=1] = call_function[target=torch.ops.higher_order.scan](args = (%scan_combine_graph_0, [], [%permute, %permute_1], ()), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%scan, 0), kwargs = {})
return (getitem,)
tracing¶
FAILED
symbolically traced variables cannot be used as inputs to control flow
Summary¶
case |
nostrict |
nostrict-decall |
nostrict-oblivious |
strict |
tracing |
|---|---|---|---|---|---|
FAIL |
|||||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
||||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
|||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
FAIL |
FAIL |
FAIL |
FAIL |
|
FAIL |
FAIL |
FAIL |
FAIL |
||
FAIL |
|||||
FAIL |
FAIL |