123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960 |
- import torch
- import torchvision
- import onnx
- def get_args_parser(add_help=True):
- import argparse
- parser = argparse.ArgumentParser(description="model weight transport to onnx", add_help=add_help)
- parser.add_argument("--model", default="resnet18", type=str, help="model name")
- parser.add_argument(
- "--num_classes", default=10, type=int, help="number of classes"
- )
- parser.add_argument(
- "--input_size", default=224, type=int, help="input size"
- )
- parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
- parser.add_argument("--save_path", default=None, type=str, help="onnx file save path")
- return parser
- def export_onnx(args):
-
- model = torchvision.models.get_model(args.model, weights=None, num_classes=args.num_classes)
- model.eval()
-
- checkpoint = torch.load(args.weights, map_location='cpu')
- model.load_state_dict(checkpoint["model"])
-
-
- dummy_input = torch.randn(1, 3, args.input_size, args.input_size)
-
- with torch.no_grad():
- torch.onnx.export(
- model,
- dummy_input,
- args.save_path,
- export_params=True,
- opset_version=11,
- do_constant_folding=False,
- input_names=["input"],
- output_names=["output"],
- dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}}
- )
- print("模型成功导出为 ONNX 格式!")
-
- model_onnx = onnx.load(args.save_path)
- onnx.checker.check_model(model_onnx)
- if __name__ == '__main__':
- args = get_args_parser().parse_args()
- export_onnx(args)
|