[docs]defort_optimized_model(onx:Union[str,ModelProto],level:str="ORT_ENABLE_ALL",output:Optional[str]=None,)->Union[str,ModelProto]:""" Returns the optimized model used by onnxruntime before running computing the inference. :param onx: ModelProto :param level: optimization level, `'ORT_ENABLE_BASIC'`, `'ORT_ENABLE_EXTENDED'`, `'ORT_ENABLE_ALL'` :param output: output file if the proposed cache is not wanted :return: optimized model """glevel=getattr(GraphOptimizationLevel,level,None)ifglevelisNone:raiseValueError(f"Unrecognized level {level!r} among {dir(GraphOptimizationLevel)}.")ifoutputisnotNone:cache=outputelse:cache=get_cache_file("ort_optimized_model.onnx",remove=True)so=SessionOptions()so.graph_optimization_level=glevelso.optimized_model_filepath=str(cache)InferenceSession(onxifisinstance(onx,str)elseonx.SerializeToString(),so,providers=["CPUExecutionProvider"],)ifoutputisNoneandnotcache.exists():raiseRuntimeError(f"The optimized model {str(cache)!r} not found.")ifoutputisnotNone:returnoutputifisinstance(onx,str):returnstr(cache)opt_onx=load(str(cache))cache.unlink()returnopt_onx