.xbuilder.reverse_graph_builder¶
- class experimental_experiment.xbuilder.reverse_graph_builder.CustomBuilderEmitter(make_model_function: str = 'make_my_model')[source]¶
Custom
onnx_array_api.translate_api.builder_emitter.BuilderEmitter
.
- experimental_experiment.xbuilder.reverse_graph_builder.to_graph_builder_code(proto: ModelProto, function_name: str = 'build_model') str [source]¶
Produces a code building a model with
experimental_experiment.xbuilder.GraphBuilder
.- Parameters:
proto – model to convert into a code
function_name – function name
- Returns:
str
Example (see also Playground for big optimization pattern):
<<<
import numpy as np import onnx import onnx.helper as oh import onnx.numpy_helper as onh from experimental_experiment.xbuilder.reverse_graph_builder import ( to_graph_builder_code, ) TFLOAT = onnx.TensorProto.FLOAT TINT64 = onnx.TensorProto.INT64 model = oh.make_model( oh.make_graph( [ oh.make_node( "ConstantOfShape", ["shape"], ["cst"], value=onh.from_array(np.array([0], dtype=np.float32)), ), oh.make_node( "ScatterND", ["cst", "indices", "updates"], ["Z"], reduction="add", ), ], "create_graph", [ oh.make_tensor_value_info("shape", TINT64, [None]), oh.make_tensor_value_info("indices", TINT64, [None, None]), oh.make_tensor_value_info("updates", TFLOAT, [None, None, None]), ], [oh.make_tensor_value_info("Z", TFLOAT, [None, None, None])], ), opset_imports=[ oh.make_opsetid("", 18), ], ir_version=9, ) print(to_graph_builder_code(model))
>>>
import numpy as np from onnx import TensorProto from onnx.numpy_helper import from_array from experimental_experiment.xbuilder import GraphBuilder, FunctionOptions def create_graph( op: "GraphBuilder", shape: "INT64[]", indices: "INT64[, ]", updates: "FLOAT[, , ]", ): cst = op.ConstantOfShape(shape, value=from_array(np.array([0.0], dtype=np.float32), name='value'), outputs=['cst']) Z = op.ScatterND(cst, indices, updates, reduction='add', outputs=['Z']) op.Identity(Z, outputs=["Z"]) return Z def make_my_model() -> "ModelProto": g = GraphBuilder({'': 18}, ir_version=9) g.make_tensor_input("shape", TensorProto.INT64, ('',)) g.make_tensor_input("indices", TensorProto.INT64, ('', '')) g.make_tensor_input("updates", TensorProto.FLOAT, ('', '', '')) create_graph(g.op, "shape", "indices", "updates") g.make_tensor_output("Z", TensorProto.FLOAT, ('', '', ''), is_dimension=False, indexed=False) model = g.to_onnx() return model model = make_my_model()
- experimental_experiment.xbuilder.reverse_graph_builder.to_graph_pattern_matching(proto: FunctionProto | GraphProto | ModelProto) str [source]¶
Produces a code matching a pattern.
- Parameters:
proto – model to convert into a code
- Returns:
str
Example (see also Playground for big optimization pattern):
<<<
import numpy as np import onnx import onnx.helper as oh import onnx.numpy_helper as onh from experimental_experiment.xbuilder.reverse_graph_builder import ( to_graph_pattern_matching, ) TFLOAT = onnx.TensorProto.FLOAT TINT64 = onnx.TensorProto.INT64 model = oh.make_model( oh.make_graph( [ oh.make_node( "ConstantOfShape", ["shape"], ["cst"], value=onh.from_array(np.array([0], dtype=np.float32)), ), oh.make_node( "ScatterND", ["cst", "indices", "updates"], ["Z"], reduction="add", ), ], "create_graph", [ oh.make_tensor_value_info("shape", TINT64, [None]), oh.make_tensor_value_info("indices", TINT64, [None, None]), oh.make_tensor_value_info("updates", TFLOAT, [None, None, None]), ], [oh.make_tensor_value_info("Z", TFLOAT, [None, None, None])], ), opset_imports=[ oh.make_opsetid("", 18), ], ir_version=9, ) print(to_graph_pattern_matching(model))
>>>
node_1_ScatterND = node if node_1_ScatterND.op_type != 'ScatterND' or node_1_ScatterND.domain != '': return self.none() cst = node_1_ScatterND.input[0] indices = node_1_ScatterND.input[1] updates = node_1_ScatterND.input[2] # updates has no predecessor. # indices has no predecessor. if g.is_used_more_than_once(cst): return self.none(node, inspect.currentframe().f_lineno) node_0_ConstantOfShape = g.node_before(cst) if node_0_ConstantOfShape is None or node_0_ConstantOfShape.op_type != 'ConstantOfShape' or node_0_ConstantOfShape.domain != '': return self.none(node, inspect.currentframe().f_lineno) shape = node_0_ConstantOfShape.input[0] # shape has no predecessor. # list of nodes nodes = [node_0_ConstantOfShape, node_1_ScatterND]