Note
Go to the end to download the full example code.
Comparing the four ONNX translation APIs#
translate converts an
onnx.ModelProto into Python source code that, when executed,
recreates the same model. Four output APIs are available:
"onnx"— usesonnx.helper(oh.make_node,oh.make_graph, …) viaInnerEmitter."onnx-short"— same as"onnx"but replaces large initializers with random values to keep the snippet compact, viaInnerEmitterShortInitializer."light"— fluentstart(…).vin(…).…chain, viaLightEmitter."builder"—GraphBuilder-based function wrapper, viaBuilderEmitter.
This example builds a small model, translates it with every API, shows the
generated code, and verifies that the "onnx" snippet can be re-executed to
reproduce the original model.
import numpy as np
import onnx
import onnx.helper as oh
import onnx.numpy_helper as onh
from yobx.translate import translate, translate_header
Build the model#
We use Z = Relu(X @ W + b) as a running example:
a single Gemm followed by Relu.
TFLOAT = onnx.TensorProto.FLOAT
INT64 = onnx.TensorProto.INT64
W = onh.from_array(np.random.randn(8, 5).astype(np.float32), name="W")
b = onh.from_array(np.random.randn(5).astype(np.float32), name="b")
model = oh.make_model(
oh.make_graph(
[oh.make_node("Gemm", ["X", "W", "b"], ["T"]), oh.make_node("Relu", ["T"], ["Z"])],
"gemm_relu",
[oh.make_tensor_value_info("X", TFLOAT, [None, 8])],
[oh.make_tensor_value_info("Z", TFLOAT, [None, 5])],
[W, b],
),
opset_imports=[oh.make_opsetid("", 17)],
ir_version=9,
)
print(f"Model: {len(model.graph.node)} node(s), {len(model.graph.initializer)} initializer(s)")
Model: 2 node(s), 2 initializer(s)
1. "onnx" API — full initializer values#
The generated code uses onnx.helper.make_node(),
onnx.helper.make_graph(), and onnx.helper.make_model().
Every initializer is serialised as an exact np.array(…) literal.
=== api='onnx' ===
opset_imports = [
oh.make_opsetid('', 17),
]
inputs = []
outputs = []
nodes = []
initializers = []
sparse_initializers = []
functions = []
initializers.append(
onh.from_array(
np.array([[0.09113407135009766, -1.9420450925827026, 0.29207441210746765, -0.10008358955383301, 1.1141226291656494], [-0.13821659982204437, -1.373523473739624, 1.2217375040054321, -0.563983142375946, 2.0154261589050293], [-0.2249707281589508, 0.8632537722587585, -0.44700318574905396, -1.6374095678329468, -1.8711106777191162], [0.4297427535057068, 1.4997031688690186, -0.3691343665122986, 0.05789622291922569, -0.10762215405702591], [-1.0069392919540405, 1.942077398300171, 0.05949755385518074, 0.3610239326953888, -0.19343183934688568], [-1.5105663537979126, 0.5791450142860413, -0.47895529866218567, -0.38696902990341187, -0.717380702495575], [0.040528614073991776, -1.2248902320861816, 0.6879969835281372, 1.190011978149414, 0.7228995561599731], [-0.25929516553878784, 0.16251587867736816, 0.7109960317611694, -0.12187383323907852, -0.9744327664375305]], dtype=np.float32),
name='W'
)
)
initializers.append(
onh.from_array(
np.array([2.4226601123809814, 1.3718501329421997, 0.5806605219841003, 1.3703954219818115, -1.2084741592407227], dtype=np.float32),
name='b'
)
)
inputs.append(oh.make_tensor_value_info('X', onnx.TensorProto.FLOAT, shape=(None, 8)))
nodes.append(
oh.make_node(
'Gemm',
['X', 'W', 'b'],
['T']
)
)
nodes.append(
oh.make_node(
'Relu',
['T'],
['Z']
)
)
outputs.append(oh.make_tensor_value_info('Z', onnx.TensorProto.FLOAT, shape=(None, 5)))
graph = oh.make_graph(
nodes,
'gemm_relu',
inputs,
outputs,
initializers,
sparse_initializer=sparse_initializers,
)
model = oh.make_model(
graph,
functions=functions,
opset_imports=opset_imports,
ir_version=9,
)
2. "onnx-short" API — large initializers replaced by random values#
Identical to "onnx" except that initializers with more than 16 elements
are replaced by np.random.randn(…) / np.random.randint(…) calls.
This keeps the snippet readable when dealing with large weight tensors.
code_short = translate(model, api="onnx-short")
print("=== api='onnx-short' ===")
print(code_short)
=== api='onnx-short' ===
opset_imports = [
oh.make_opsetid('', 17),
]
inputs = []
outputs = []
nodes = []
initializers = []
sparse_initializers = []
functions = []
value = np.random.randn(8, 5).astype(np.float32)
initializers.append(
onh.from_array(
np.array(value, dtype=np.float32),
name='W'
)
)
initializers.append(
onh.from_array(
np.array([2.4226601123809814, 1.3718501329421997, 0.5806605219841003, 1.3703954219818115, -1.2084741592407227], dtype=np.float32),
name='b'
)
)
inputs.append(oh.make_tensor_value_info('X', onnx.TensorProto.FLOAT, shape=(None, 8)))
nodes.append(
oh.make_node(
'Gemm',
['X', 'W', 'b'],
['T']
)
)
nodes.append(
oh.make_node(
'Relu',
['T'],
['Z']
)
)
outputs.append(oh.make_tensor_value_info('Z', onnx.TensorProto.FLOAT, shape=(None, 5)))
graph = oh.make_graph(
nodes,
'gemm_relu',
inputs,
outputs,
initializers,
sparse_initializer=sparse_initializers,
)
model = oh.make_model(
graph,
functions=functions,
opset_imports=opset_imports,
ir_version=9,
)
Size comparison between the two onnx variants:
print(f"\nFull code length : {len(code_onnx):>6} characters")
print(f"Short code length : {len(code_short):>6} characters")
Full code length : 1906 characters
Short code length : 1112 characters
3. "light" API — fluent chain#
The output is a single method-chain expression (start(…).vin(…).…).
code_light = translate(model, api="light")
print("=== api='light' ===")
print(code_light)
=== api='light' ===
(
start(opset=17)
.cst(np.array([[0.09113407135009766, -1.9420450925827026, 0.29207441210746765, -0.10008358955383301, 1.1141226291656494], [-0.13821659982204437, -1.373523473739624, 1.2217375040054321, -0.563983142375946, 2.0154261589050293], [-0.2249707281589508, 0.8632537722587585, -0.44700318574905396, -1.6374095678329468, -1.8711106777191162], [0.4297427535057068, 1.4997031688690186, -0.3691343665122986, 0.05789622291922569, -0.10762215405702591], [-1.0069392919540405, 1.942077398300171, 0.05949755385518074, 0.3610239326953888, -0.19343183934688568], [-1.5105663537979126, 0.5791450142860413, -0.47895529866218567, -0.38696902990341187, -0.717380702495575], [0.040528614073991776, -1.2248902320861816, 0.6879969835281372, 1.190011978149414, 0.7228995561599731], [-0.25929516553878784, 0.16251587867736816, 0.7109960317611694, -0.12187383323907852, -0.9744327664375305]], dtype=np.float32))
.rename('W')
.cst(np.array([2.4226601123809814, 1.3718501329421997, 0.5806605219841003, 1.3703954219818115, -1.2084741592407227], dtype=np.float32))
.rename('b')
.vin('X', elem_type=onnx.TensorProto.FLOAT, shape=(None, 8))
.bring('X', 'W', 'b')
.Gemm()
.rename('T')
.bring('T')
.Relu()
.rename('Z')
.bring('Z')
.vout(elem_type=onnx.TensorProto.FLOAT, shape=(None, 5))
.to_onnx()
)
4. "builder" API — GraphBuilder#
The output uses GraphBuilder to wrap the graph nodes in a Python function.
code_builder = translate(model, api="builder")
print("=== api='builder' ===")
print(code_builder)
=== api='builder' ===
def gemm_relu(
op: "GraphBuilder",
X: "FLOAT[None, 8]",
):
W = np.array([[0.09113407135009766, -1.9420450925827026, 0.29207441210746765, -0.10008358955383301, 1.1141226291656494], [-0.13821659982204437, -1.373523473739624, 1.2217375040054321, -0.563983142375946, 2.0154261589050293], [-0.2249707281589508, 0.8632537722587585, -0.44700318574905396, -1.6374095678329468, -1.8711106777191162], [0.4297427535057068, 1.4997031688690186, -0.3691343665122986, 0.05789622291922569, -0.10762215405702591], [-1.0069392919540405, 1.942077398300171, 0.05949755385518074, 0.3610239326953888, -0.19343183934688568], [-1.5105663537979126, 0.5791450142860413, -0.47895529866218567, -0.38696902990341187, -0.717380702495575], [0.040528614073991776, -1.2248902320861816, 0.6879969835281372, 1.190011978149414, 0.7228995561599731], [-0.25929516553878784, 0.16251587867736816, 0.7109960317611694, -0.12187383323907852, -0.9744327664375305]], dtype=np.float32)
b = np.array([2.4226601123809814, 1.3718501329421997, 0.5806605219841003, 1.3703954219818115, -1.2084741592407227], dtype=np.float32)
T = op.Gemm(X, W, b, outputs=['T'])
Z = op.Relu(T, outputs=['Z'])
op.Identity(Z, outputs=["Z"])
return Z
g = GraphBuilder({'': 17}, ir_version=9)
g.make_tensor_input("X", onnx.TensorProto.FLOAT, (None, 8))
gemm_relu(g.op, "X")
g.make_tensor_output("Z", onnx.TensorProto.FLOAT, (None, 5), is_dimension=False, indexed=False)
model = g.to_onnx()
Round-trip verification#
The "onnx" snippet is fully self-contained and executable.
Running it should recreate a model with the same graph structure.
header = translate_header("onnx")
full_code = header + "\n" + code_onnx
ns: dict = {}
exec(compile(full_code, "<translate>", "exec"), ns) # noqa: S102
recreated = ns["model"]
assert isinstance(recreated, onnx.ModelProto)
assert len(recreated.graph.node) == len(
model.graph.node
), f"Expected {len(model.graph.node)} nodes, got {len(recreated.graph.node)}"
assert len(recreated.graph.initializer) == len(model.graph.initializer), (
f"Expected {len(model.graph.initializer)} initializers, "
f"got {len(recreated.graph.initializer)}"
)
print("\nRound-trip succeeded ✓")
Round-trip succeeded ✓
Plot: code size by API#
The bar chart compares the number of characters produced by each API for the
same model. "onnx-short" is always ≤ "onnx" because it compresses
large initializers.
import matplotlib.pyplot as plt # noqa: E402
api_labels = ["onnx", "onnx-short", "light", "builder"]
code_sizes = [len(code_onnx), len(code_short), len(code_light), len(code_builder)]
fig, ax = plt.subplots(figsize=(7, 4))
bars = ax.bar(api_labels, code_sizes, color=["#4c72b0", "#dd8452", "#55a868", "#c44e52"])
ax.set_ylabel("Generated code size (characters)")
ax.set_title("ONNX translation: code size by API")
for bar, size in zip(bars, code_sizes):
ax.text(
bar.get_x() + bar.get_width() / 2,
bar.get_height() * 1.01,
str(size),
ha="center",
va="bottom",
fontsize=9,
)
plt.tight_layout()
plt.show()

Total running time of the script: (0 minutes 0.156 seconds)
Related examples
MiniOnnxBuilder: serialize tensors to an ONNX model