Skip to content

Commit d2009dd

Browse files
committed
isolate code_gen in namespace change
1 parent ab45708 commit d2009dd

File tree

8 files changed

+403
-36
lines changed

8 files changed

+403
-36
lines changed

example-models

hls4ml/backends/fpga/fpga_backend.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -727,7 +727,7 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke
727727

728728
generated_code = (
729729
"template<class data_T, typename CONFIG_T>\n"
730-
"class fill_buffer_{index} : public FillConv1DBuffer<data_T, CONFIG_T> {{\n"
730+
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
731731
" public:\n"
732732
" static void fill_buffer(\n"
733733
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
@@ -857,7 +857,7 @@ def generate_conv2d_line_buffer_fn(
857857

858858
generated_code = (
859859
"template<class data_T, typename CONFIG_T>\n"
860-
"class fill_buffer_{index} : public FillConv2DBuffer<data_T, CONFIG_T> {{\n"
860+
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
861861
" public:\n"
862862
" static void fill_buffer(\n"
863863
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"

hls4ml/backends/template.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ def _default_config_params(self, layer):
6262
params = self._default_params(layer)
6363
params['iotype'] = layer.model.config.get_config_value('IOType')
6464
params['reuse'] = layer.get_attr('reuse_factor')
65+
params['namespace'] = layer.model.config.get_writer_config().get('Namespace', 'nnet')
6566

6667
return params
6768

hls4ml/backends/vivado/passes/convolution_templates.py

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
typedef {bias_t.name} bias_t;
2424
typedef {weight_t.name} weight_t;
2525
template<class data_T, class res_T, class CONFIG_T>
26-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
26+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2727
template<class x_T, class y_T>
2828
using product = nnet::product::{product_type}<x_T, y_T>;
2929
}};\n"""
@@ -53,7 +53,7 @@
5353
static const unsigned n_partitions = {n_partitions};
5454
static const unsigned n_pixels = out_width / n_partitions;
5555
template<class data_T, class CONFIG_T>
56-
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
56+
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
5757
typedef {accum_t.name} accum_t;
5858
typedef {bias_t.name} bias_t;
5959
typedef {weight_t.name} weight_t;
@@ -89,9 +89,10 @@ def format(self, node):
8989
params['scale_index_type'] = 'scale_index_regular'
9090

9191
if node.model.config.get_config_value('IOType') == 'io_parallel':
92-
params['fill_fn'] = f'fill_buffer_{node.index}'
92+
namespace = params['namespace']
93+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
9394
else:
94-
params['fill_fn'] = 'FillConv1DBuffer'
95+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
9596

9697
conv_config = self.template.format(**params)
9798

@@ -103,16 +104,18 @@ def format(self, node):
103104
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
104105
)
105106

107+
namespace = params['namespace']
108+
106109
if node.get_attr('strategy').lower() == 'latency':
107-
mult_params['dense_function'] = 'DenseLatency'
110+
mult_params['dense_function'] = 'nnet::DenseLatency'
108111
elif node.get_attr('strategy').lower() == 'resource':
109112
if int(mult_params['reuse_factor']) <= int(mult_params['n_in']):
110-
mult_params['dense_function'] = 'DenseResource_rf_leq_nin'
113+
mult_params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
111114
else:
112-
mult_params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
115+
mult_params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
113116
# The 3rd case is never used
114117
elif node.get_attr('strategy').lower() == 'resource_unrolled':
115-
mult_params['dense_function'] = f'dense_resource_unrolled_{node.index}'
118+
mult_params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'
116119

117120
mult_config = self.mult_template.format(**mult_params)
118121

@@ -170,7 +173,7 @@ def __init__(self):
170173
static const unsigned n_partitions = {n_partitions};
171174
static const unsigned n_pixels = out_height * out_width / n_partitions;
172175
template<class data_T, class CONFIG_T>
173-
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
176+
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
174177
typedef {accum_t.name} accum_t;
175178
typedef {bias_t.name} bias_t;
176179
typedef {weight_t.name} weight_t;
@@ -214,9 +217,10 @@ def format(self, node):
214217
params['scale_index_width_type'] = 'scale_index_regular'
215218

216219
if node.model.config.get_config_value('IOType') == 'io_parallel':
217-
params['fill_fn'] = f'fill_buffer_{node.index}'
220+
namespace = params['namespace']
221+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
218222
else:
219-
params['fill_fn'] = 'FillConv2DBuffer'
223+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
220224

221225
conv_config = self.template.format(**params)
222226

@@ -313,9 +317,10 @@ def format(self, node):
313317
params['weight_t'] = node.get_weights('depthwise').type
314318
params['bias_t'] = node.get_weights('zero_bias').type
315319
if node.model.config.get_config_value('IOType') == 'io_parallel':
316-
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
320+
namespace = params['namespace']
321+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
317322
else:
318-
params['fill_fn'] = 'FillConv1DBuffer'
323+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
319324

320325
if node.get_attr('unscaled'):
321326
params['scale_index_type'] = 'scale_index_unscaled'
@@ -359,9 +364,10 @@ def format(self, node):
359364
params['min_width'] = params['in_width']
360365
params['instructions'] = '0'
361366
if node.model.config.get_config_value('IOType') == 'io_parallel':
362-
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
367+
namespace = params['namespace']
368+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
363369
else:
364-
params['fill_fn'] = 'FillConv1DBuffer'
370+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
365371

366372
if node.get_attr('unscaled'):
367373
params['scale_index_type'] = 'scale_index_unscaled'
@@ -446,9 +452,10 @@ def format(self, node):
446452
params['index'] = str(node.index) + '_depthwise'
447453
params['weight_t'] = node.get_weights('depthwise').type
448454
if node.model.config.get_config_value('IOType') == 'io_parallel':
449-
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
455+
namespace = params['namespace']
456+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
450457
else:
451-
params['fill_fn'] = 'FillConv2DBuffer'
458+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
452459

453460
if node.get_attr('unscaled_h'):
454461
params['scale_index_height_type'] = 'scale_index_unscaled'
@@ -500,9 +507,10 @@ def format(self, node):
500507
params['min_width'] = params['in_width']
501508
params['instructions'] = '0'
502509
if node.model.config.get_config_value('IOType') == 'io_parallel':
503-
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
510+
namespace = params['namespace']
511+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
504512
else:
505-
params['fill_fn'] = 'FillConv2DBuffer'
513+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
506514

507515
if node.get_attr('unscaled_h'):
508516
params['scale_index_height_type'] = 'scale_index_unscaled'

hls4ml/backends/vivado/passes/core_templates.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
typedef {weight_t.name} weight_t;
2121
typedef {index_t.name} index_t;
2222
template<class data_T, class res_T, class CONFIG_T>
23-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
23+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2424
template<class x_T, class y_T>
2525
using product = nnet::product::{product_type}<x_T, y_T>;
2626
}};\n"""
@@ -43,16 +43,18 @@ def format(self, node):
4343
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
4444
)
4545

46+
namespace = params['namespace']
47+
4648
if node.get_attr('strategy').lower() == 'latency':
47-
params['dense_function'] = 'DenseLatency'
49+
params['dense_function'] = 'nnet::DenseLatency'
4850
elif node.get_attr('strategy').lower() == 'resource':
4951
if int(params['reuse_factor']) <= int(params['n_in']):
50-
params['dense_function'] = 'DenseResource_rf_leq_nin'
52+
params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
5153
else:
52-
params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
54+
params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
5355
# The 3rd case is never used
5456
elif node.get_attr('strategy').lower() == 'resource_unrolled':
55-
params['dense_function'] = f'dense_resource_unrolled_{node.index}'
57+
params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'
5658

5759
return self.template.format(**params)
5860

hls4ml/backends/vivado/passes/recurrent_templates.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
typedef {bias_t.name} bias_t;
1818
typedef {weight_t.name} weight_t;
1919
template<class data_T, class res_T, class CONFIG_T>
20-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
20+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2121
template<class x_T, class y_T>
2222
using product = nnet::product::{product_type}<x_T, y_T>;
2323
}};\n"""
@@ -141,16 +141,18 @@ def format(self, node):
141141
mult_params1['nzeros'] = node.get_weights('weight').nzeros
142142
mult_params1['nonzeros'] = node.get_weights('weight').nonzeros
143143

144+
namespace = params['namespace']
145+
144146
if node.get_attr('strategy').lower() == 'latency':
145-
mult_params1['dense_function'] = 'DenseLatency'
147+
mult_params1['dense_function'] = 'nnet::DenseLatency'
146148
elif node.get_attr('strategy').lower() == 'resource':
147149
if int(mult_params1['reuse_factor']) <= int(mult_params1['n_in']):
148-
mult_params1['dense_function'] = 'DenseResource_rf_leq_nin'
150+
mult_params1['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
149151
else:
150-
mult_params1['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
152+
mult_params1['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
151153
# The 3rd case is never used
152154
elif node.get_attr('strategy').lower() == 'resource_unrolled':
153-
mult_params1['dense_function'] = f'dense_resource_unrolled_{node.index}_1'
155+
mult_params1['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_1'
154156

155157
if node.get_attr('return_sequences'):
156158
mult_params2['n_in'] = node.get_output_variable().shape[1]
@@ -167,15 +169,15 @@ def format(self, node):
167169
mult_params2['nonzeros'] = node.get_weights('recurrent_weight').nonzeros
168170

169171
if node.get_attr('strategy').lower() == 'latency':
170-
mult_params2['dense_function'] = 'DenseLatency'
172+
mult_params2['dense_function'] = 'nnet::DenseLatency'
171173
elif node.get_attr('strategy').lower() == 'resource':
172174
if int(mult_params2['reuse_factor']) <= int(mult_params2['n_in']):
173-
mult_params2['dense_function'] = 'DenseResource_rf_leq_nin'
175+
mult_params2['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
174176
else:
175-
mult_params2['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
177+
mult_params2['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
176178
# The 3rd case is never used
177179
elif node.get_attr('strategy').lower() == 'resource_unrolled':
178-
mult_params2['dense_function'] = f'dense_resource_unrolled_{node.index}_2'
180+
mult_params2['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_2'
179181

180182
mult_config1 = self.mult1_template.format(**mult_params1)
181183
mult_config2 = self.mult2_template.format(**mult_params2)

hls4ml/writer/vivado_writer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -790,6 +790,7 @@ def write_generated_code(self, model):
790790
contents = f.readlines()
791791
f.close()
792792
f = open(path, 'w')
793+
namespace = model.config.get_writer_config().get('Namespace', None)
793794

794795
for line in contents:
795796
if '// hls4ml insert code' in line:
@@ -799,6 +800,9 @@ def write_generated_code(self, model):
799800
newline += str(generated_code)
800801
else:
801802
newline = line
803+
if namespace is not None:
804+
if 'namespace nnet' in newline:
805+
newline = newline.replace('namespace nnet', f'namespace {namespace}')
802806
f.write(newline)
803807
f.close()
804808

0 commit comments

Comments
 (0)