torchvizやtensorboard、netronによるモデルの可視化を行おうと思っています。
しかし、どのコマンドでも、以下のエラーが発生します。
以下はonnxからnetronに出力しようとした際のエラー
python
1input4 = torch.rand(10,1684,40) 2input4 = input4.to(device) 3input5 = torch.rand(10,) 4input5 = input5.cpu() 5#torch.set_default_tensor_type('torch.cuda.FloatTensor') 6onnx_path = "netForwatch.onnx" 7torch.onnx.export(model, (input4,input5), onnx_path) 8 9netron.start(onnx_path)
python
1 2--------------------------------------------------------------------------- 3RuntimeError Traceback (most recent call last) 4/tmp/ipykernel_787804/3084293803.py in <module> 5 5 #torch.set_default_tensor_type('torch.cuda.FloatTensor') 6 6 onnx_path = "netForwatch.onnx" 7----> 7 torch.onnx.export(model, (input4,input5), onnx_path) 8 8 9 9 netron.start(onnx_path) 10 11~/.local/lib/python3.8/site-packages/torch/onnx/__init__.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, custom_opsets, enable_onnx_checker, use_external_data_format) 12 314 13 315 from torch.onnx import utils 14--> 316 return utils.export(model, args, f, export_params, verbose, training, 15 317 input_names, output_names, operator_export_type, opset_version, 16 318 _retain_param_name, do_constant_folding, example_outputs, 17 18~/.local/lib/python3.8/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, custom_opsets, enable_onnx_checker, use_external_data_format) 19 105 "Otherwise set to False because of size limits imposed by Protocol Buffers.") 20 106 21--> 107 _export(model, args, f, export_params, verbose, training, input_names, output_names, 22 108 operator_export_type=operator_export_type, opset_version=opset_version, 23 109 do_constant_folding=do_constant_folding, example_outputs=example_outputs, 24 25~/.local/lib/python3.8/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, example_outputs, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, use_external_data_format, onnx_shape_inference) 26 722 27 723 graph, params_dict, torch_out = \ 28--> 724 _model_to_graph(model, args, verbose, input_names, 29 725 output_names, operator_export_type, 30 726 example_outputs, val_do_constant_folding, 31 32~/.local/lib/python3.8/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, example_outputs, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes) 33 491 args = (args, ) 34 492 35--> 493 graph, params, torch_out, module = _create_jit_graph(model, args) 36 494 37 495 params_dict = _get_named_param_dict(graph, params) 38 39~/.local/lib/python3.8/site-packages/torch/onnx/utils.py in _create_jit_graph(model, args) 40 435 return graph, params, torch_out, None 41 436 else: 42--> 437 graph, torch_out = _trace_and_get_graph_from_model(model, args) 43 438 state_dict = _unique_state_dict(model) 44 439 params = list(state_dict.values()) 45 46~/.local/lib/python3.8/site-packages/torch/onnx/utils.py in _trace_and_get_graph_from_model(model, args) 47 386 48 387 trace_graph, torch_out, inputs_states = \ 49--> 388 torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True) 50 389 warn_on_static_input_change(inputs_states) 51 390 52 53~/.local/lib/python3.8/site-packages/torch/jit/_trace.py in _get_trace_graph(f, args, kwargs, strict, _force_outplace, return_inputs, _return_inputs_states) 54 1164 if not isinstance(args, tuple): 55 1165 args = (args,) 56-> 1166 outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs) 57 1167 return outs 58 59~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 60 1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 61 1101 or _global_forward_hooks or _global_forward_pre_hooks): 62-> 1102 return forward_call(*input, **kwargs) 63 1103 # Do not call functions when jit is used 64 1104 full_backward_hooks, non_full_backward_hooks = [], [] 65 66~/.local/lib/python3.8/site-packages/torch/jit/_trace.py in forward(self, *args) 67 125 return tuple(out_vars) 68 126 69--> 127 graph, out = torch._C._create_graph_by_tracing( 70 128 wrapper, 71 129 in_vars + module_state, 72 73RuntimeError: 0INTERNAL ASSERT FAILED at "../torch/csrc/jit/ir/alias_analysis.cpp":584, please report a bug to PyTorch. We don't have an op for aten::fill_ but it isn't a special case. Argument types: Tensor, bool, 74
残り二つのほうでも同じエラーが起きるのですが、エラーを見る限り引数のエラーではないと思いますがいかがでしょうか。
1/06段階で、使用するパッケージは最新になっています。
また、このコードはサーバー上で行っています。
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。