@@ -203,6 +203,53 @@ def _remove_unused_initializers(nodes, initializers):
203
203
return adjusted_initializers
204
204
205
205
206
+ def _remove_unused_nodes (nodes , inputs , outputs ):
207
+ nodes_input_set = set ()
208
+ for n_ in nodes :
209
+ for input_name_ in n_ .input :
210
+ nodes_input_set .add (input_name_ )
211
+
212
+ input_dict = set ([in_ .name for in_ in inputs ])
213
+ output_dict = {}
214
+ for nd_ in nodes :
215
+ output_dict .update ({o_ : nd_ for o_ in nd_ .output })
216
+
217
+ nodes_to_keep = set ()
218
+ node_inputs = [output_dict [ts_ .name ] for ts_ in outputs ]
219
+ while node_inputs :
220
+ nd_ = node_inputs [0 ]
221
+ del node_inputs [0 ]
222
+ if id (nd_ ) in nodes_to_keep :
223
+ continue
224
+
225
+ nodes_to_keep .add (id (nd_ ))
226
+ for in_ in nd_ .input :
227
+ if in_ in output_dict :
228
+ node_inputs .append (output_dict [in_ ])
229
+ else :
230
+ assert in_ == '' or in_ in input_dict
231
+
232
+ return [nd_ for nd_ in nodes if id (nd_ ) in nodes_to_keep ]
233
+
234
+
235
+ def _build_extra_inputs (container ):
236
+ # When calling ModelComponentContainer's add_initializer(...), nothing is added into the input list.
237
+ # However, In ONNX, for target opset < 9, initializers should also be model's (GraphProto) inputs.
238
+ # Thus, we create ValueInfoProto objects from initializers (type: TensorProto) directly and then add them into model's input list.
239
+ extra_inputs = [] # ValueInfoProto list of the initializers
240
+ for tensor in container .initializers :
241
+ # Sometimes (especially when creating optional input values such as RNN's initial hidden state), an initializer
242
+ # is also one of the original model's input, so it has been added into the container's input list. If this is
243
+ # the case, we need to skip one iteration to avoid duplicated inputs.
244
+ if tensor .name in [value_info .name for value_info in container .inputs ]:
245
+ continue
246
+
247
+ # Initializers are always tensors so we can just call make_tensor_value_info(...)
248
+ value_info = helper .make_tensor_value_info (tensor .name , tensor .data_type , tensor .dims )
249
+ extra_inputs .append (value_info )
250
+ return extra_inputs
251
+
252
+
206
253
def convert_topology (topology , model_name , doc_string , target_opset , channel_first_inputs = None ):
207
254
"""
208
255
This function is used to convert our Topology object defined in _parser.py into a ONNX model (type: ModelProto).
@@ -271,28 +318,15 @@ def convert_topology(topology, model_name, doc_string, target_opset, channel_fir
271
318
raise RuntimeError ("Unexpected error on find the converter for op {}" .format (operator .type ))
272
319
cvt (scope , operator , container )
273
320
274
- # When calling ModelComponentContainer's add_initializer(...), nothing is added into the input list.
275
- # However, In ONNX, for target opset < 9, initializers should also be model's (GraphProto) inputs.
276
- # Thus, we create ValueInfoProto objects from initializers (type: TensorProto) directly and then add them into model's input list.
277
- extra_inputs = [] # ValueInfoProto list of the initializers
278
- for tensor in container .initializers :
279
- # Sometimes (especially when creating optional input values such as RNN's initial hidden state), an initializer
280
- # is also one of the original model's input, so it has been added into the container's input list. If this is
281
- # the case, we need to skip one iteration to avoid duplicated inputs.
282
- if tensor .name in [value_info .name for value_info in container .inputs ]:
283
- continue
284
-
285
- # Initializers are always tensors so we can just call make_tensor_value_info(...)
286
- value_info = helper .make_tensor_value_info (tensor .name , tensor .data_type , tensor .dims )
287
- extra_inputs .append (value_info )
288
-
289
321
# enable the ONNX optimizations
290
322
graph = None
291
- nodes = container .nodes
323
+ extra_inputs = _build_extra_inputs (container )
324
+ nodes = _remove_unused_nodes (container .nodes , container .inputs + extra_inputs , container .outputs )
325
+
292
326
if not topology .debug_mode :
293
327
try :
294
328
import onnxconverter_common
295
- origin_node_number = len (container . nodes )
329
+ origin_node_number = len (nodes )
296
330
if target_opset < 9 :
297
331
nodes = onnxconverter_common .optimizer .optimize_onnx (nodes , nchw_inputs = nchw_inputs ,
298
332
inputs = container .inputs + extra_inputs ,
@@ -307,7 +341,8 @@ def convert_topology(topology, model_name, doc_string, target_opset, channel_fir
307
341
model_name = model_name ,
308
342
target_opset = container .target_opset )
309
343
node_number = len (graph .node )
310
- k2o_logger ().info ("The node number after optimization: {} -> {}" .format (origin_node_number , node_number ))
344
+ k2o_logger ().info (
345
+ "The ONNX operator number change on the optimization: {} -> {}" .format (origin_node_number , node_number ))
311
346
except ImportError :
312
347
onnx_not_imported = 'onnxconverter_common is not imported,'
313
348
if nchw_inputs :
@@ -326,8 +361,8 @@ def convert_topology(topology, model_name, doc_string, target_opset, channel_fir
326
361
if graph is None :
327
362
# Create a graph from its main components
328
363
adjusted_initializers = _remove_unused_initializers (nodes , container .initializers )
329
- adjusted_extra_inputs = _remove_unused_initializers (nodes , extra_inputs )
330
364
if target_opset < 9 :
365
+ adjusted_extra_inputs = _remove_unused_initializers (nodes , extra_inputs )
331
366
graph = helper .make_graph (nodes , model_name , container .inputs + adjusted_extra_inputs ,
332
367
container .outputs , adjusted_initializers )
333
368
else :
0 commit comments