Skip to content

Commit 47778be

Browse files
committed
Egor
1 parent bf41fb4 commit 47778be

File tree

14 files changed

+198
-182
lines changed

14 files changed

+198
-182
lines changed

src/plugins/intel_cpu/src/cpu_streams_calculation.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -362,14 +362,15 @@ std::vector<std::vector<int>> get_streams_info_table(
362362
n_streams = input_infer_requests > 0 ? std::min(n_streams, input_infer_requests) : n_streams;
363363
n_threads_per_stream = -1;
364364
} else {
365-
int model_threads;
366-
if (n_threads == 1) {
367-
model_threads = 1;
368-
} else if (model_prefer_threads > n_threads) {
369-
model_threads = n_threads / 2;
370-
} else {
371-
model_threads = model_prefer_threads;
372-
}
365+
int model_threads = [&]() {
366+
if (n_threads == 1) {
367+
return 1;
368+
}
369+
if (model_prefer_threads > n_threads) {
370+
return n_threads / 2;
371+
}
372+
return model_prefer_threads;
373+
}();
373374
n_streams = ((n_threads + model_threads - 1) / model_threads);
374375
if ((input_infer_requests > 0) && (n_streams > input_infer_requests)) {
375376
n_streams = input_infer_requests;

src/plugins/intel_cpu/src/emitters/plugin/x64/debug_capabilities.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -168,14 +168,15 @@ void RegPrinter::print_vmm(jit_generator& h, REG_T vmm, const char* name) {
168168
h.push(abi_param2);
169169
h.push(abi_param3);
170170
{
171-
int vlen;
172-
if (vmm.isZMM()) {
173-
vlen = 64;
174-
} else if (vmm.isYMM()) {
175-
vlen = 32;
176-
} else {
177-
vlen = 16;
178-
}
171+
int vlen = [&]() {
172+
if (vmm.isZMM()) {
173+
return 64;
174+
}
175+
if (vmm.isYMM()) {
176+
return 32;
177+
}
178+
return 16;
179+
}();
179180
h.sub(h.rsp, vlen);
180181
h.uni_vmovups(h.ptr[h.rsp], vmm);
181182

src/plugins/intel_cpu/src/nodes/bin_conv.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -838,14 +838,15 @@ struct jit_uni_bin_conv_kernel_f32 : public jit_uni_bin_conv_kernel, public jit_
838838

839839
int nbits = 8;
840840
const int inp_mult = div_up(jcp_.ic, nbits);
841-
int out_mult;
842-
if (jcp_.with_dw_conv) {
843-
out_mult = jcp_.oc_block;
844-
} else if (jcp_.with_binarization) {
845-
out_mult = div_up(jcp_.oc, nbits);
846-
} else {
847-
out_mult = jcp_.oc;
848-
}
841+
int out_mult = [&]() {
842+
if (jcp_.with_dw_conv) {
843+
return jcp_.oc_block;
844+
}
845+
if (jcp_.with_binarization) {
846+
return div_up(jcp_.oc, nbits);
847+
}
848+
return jcp_.oc;
849+
}();
849850

850851
int l_pad = jcp_.l_pad;
851852
int r_pad = nstl::max(0, (jcp_.ow - 1) * str_w + (kw - 1) * dilate_w - (iw + l_pad - 1));

src/plugins/intel_cpu/src/nodes/deconv.cpp

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -384,14 +384,15 @@ bool Deconvolution::canBeExecutedInInt8() const {
384384
}
385385

386386
// not supported in oneDNN
387-
int channelBlock;
388-
if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core)) {
389-
channelBlock = 16;
390-
} else if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx2)) {
391-
channelBlock = 8;
392-
} else {
393-
channelBlock = 4;
394-
}
387+
int channelBlock = [&]() {
388+
if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core)) {
389+
return 16;
390+
}
391+
if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx2)) {
392+
return 8;
393+
}
394+
return 4;
395+
}();
395396
if (withGroups && !isDW && (IC % channelBlock != 0 || OC % channelBlock != 0)) {
396397
return false;
397398
}
@@ -654,14 +655,15 @@ void Deconvolution::getSupportedDescriptors() {
654655

655656
if (isInt8) {
656657
const auto& rank = getInputShapeAtPort(0).getRank();
657-
dnnl::memory::format_tag format;
658-
if (rank == 5) {
659-
format = dnnl::memory::format_tag::ndhwc;
660-
} else if (rank == 4) {
661-
format = dnnl::memory::format_tag::nhwc;
662-
} else {
663-
format = dnnl::memory::format_tag::nwc;
664-
}
658+
dnnl::memory::format_tag format = [&]() {
659+
if (rank == 5) {
660+
return dnnl::memory::format_tag::ndhwc;
661+
}
662+
if (rank == 4) {
663+
return dnnl::memory::format_tag::nhwc;
664+
}
665+
return dnnl::memory::format_tag::nwc;
666+
}();
665667
MemoryDescPtr in_candidate =
666668
std::make_shared<DnnlBlockedMemoryDesc>(getInputShapeAtPort(0), inputDataType, format);
667669
MemoryDescPtr out_candidate =

src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -221,14 +221,15 @@ void ov::intel_cpu::InterpolateExecutor::buildTblLinearOnnx(const VectorDims& sr
221221
// FrontTopLeft:0, FrontTopRight:1, FrontBottomLeft:2, FrontBottomRight:3,
222222
// EndTopLeft:4, EndTopRight:5, EndBottomLeft:6, EndBottomRight:7
223223
// weight: Left:0, ritht:1, top:2, bottom:3, front:4, end:5
224-
int eltInGrid;
225-
if (spatialDimSize > 2) {
226-
eltInGrid = MAX_INPUT_INTERPOLATE;
227-
} else if (spatialDimSize > 1) {
228-
eltInGrid = 4;
229-
} else {
230-
eltInGrid = 2;
231-
}
224+
int eltInGrid = [&]() -> int {
225+
if (spatialDimSize > 2) {
226+
return MAX_INPUT_INTERPOLATE;
227+
}
228+
if (spatialDimSize > 1) {
229+
return 4;
230+
}
231+
return 2;
232+
}();
232233
int idxType = 2;
233234
int scratchLen = rnd_up(eltInGrid * OW * OH * OD, 16);
234235
indexTable.resize(idxType * scratchLen);

src/plugins/intel_cpu/src/nodes/fake_quantize.cpp

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -104,18 +104,15 @@ struct jit_uni_binarization_kernel : public jit_uni_quantize_kernel, public jit_
104104

105105
L(unrolled_loop_label);
106106
{
107-
int step;
108-
int ur_ch;
109-
if (isa == cpu::x64::sse41) {
110-
step = nbits / 2;
111-
ur_ch = nbits;
112-
} else if (isa == cpu::x64::avx2) {
113-
step = nbits;
114-
ur_ch = nbits / 2;
115-
} else {
116-
step = 2 * nbits;
117-
ur_ch = nbits / 4;
118-
}
107+
auto [step, ur_ch] = [&]() {
108+
if (isa == cpu::x64::sse41) {
109+
return std::make_pair(nbits / 2, nbits);
110+
}
111+
if (isa == cpu::x64::avx2) {
112+
return std::make_pair(nbits, nbits / 2);
113+
}
114+
return std::make_pair(2 * nbits, nbits / 4);
115+
}();
119116
const int unrolled_loop_step = ur_ch * step;
120117

121118
cmp(reg_work_amount, unrolled_loop_step);
@@ -152,18 +149,15 @@ struct jit_uni_binarization_kernel : public jit_uni_quantize_kernel, public jit_
152149

153150
L(main_loop_label);
154151
{
155-
int repeats;
156-
int step;
157-
if (isa == cpu::x64::sse41) {
158-
repeats = 2;
159-
step = nbits / 2;
160-
} else if (isa == cpu::x64::avx2) {
161-
repeats = 1;
162-
step = nbits;
163-
} else {
164-
repeats = 1;
165-
step = nbits * 2;
166-
}
152+
auto [repeats, step] = [&]() {
153+
if (isa == cpu::x64::sse41) {
154+
return std::make_pair(2, nbits / 2);
155+
}
156+
if (isa == cpu::x64::avx2) {
157+
return std::make_pair(1, nbits);
158+
}
159+
return std::make_pair(1, nbits * 2);
160+
}();
167161
const int main_loop_step = step * repeats;
168162

169163
cmp(reg_work_amount, main_loop_step);

src/plugins/intel_cpu/src/nodes/matrix_nms.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -293,12 +293,12 @@ void MatrixNms::prepareParams() {
293293
m_numClasses = scores_dims[1];
294294

295295
int64_t max_output_boxes_per_class = 0;
296-
size_t real_num_classes;
297-
if (m_backgroundClass == -1 || static_cast<size_t>(m_backgroundClass) >= m_numClasses) {
298-
real_num_classes = m_numClasses;
299-
} else {
300-
real_num_classes = m_numClasses - 1;
301-
}
296+
size_t real_num_classes = [&]() {
297+
if (m_backgroundClass == -1 || static_cast<size_t>(m_backgroundClass) >= m_numClasses) {
298+
return m_numClasses;
299+
}
300+
return m_numClasses - 1;
301+
}();
302302
if (m_nmsTopk >= 0) {
303303
max_output_boxes_per_class = std::min(m_numBoxes, static_cast<size_t>(m_nmsTopk));
304304
} else {

src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -220,12 +220,12 @@ void MultiClassNms::prepareParams() {
220220
m_numClasses = shared ? scores_dims[1] : scores_dims[0];
221221

222222
int max_output_boxes_per_class = 0;
223-
size_t real_num_classes;
224-
if (m_backgroundClass == -1 || static_cast<size_t>(m_backgroundClass) >= m_numClasses) {
225-
real_num_classes = m_numClasses;
226-
} else {
227-
real_num_classes = m_numClasses - 1;
228-
}
223+
size_t real_num_classes = [&]() {
224+
if (m_backgroundClass == -1 || static_cast<size_t>(m_backgroundClass) >= m_numClasses) {
225+
return m_numClasses;
226+
}
227+
return m_numClasses - 1;
228+
}();
229229
if (m_nmsTopK) {
230230
max_output_boxes_per_class = (m_nmsTopK == -1) ? m_numBoxes : std::min(m_nmsTopK, static_cast<int>(m_numBoxes));
231231
m_filtBoxes.resize(max_output_boxes_per_class * m_numBatches * m_numClasses);

src/plugins/intel_cpu/src/nodes/mvn.cpp

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -798,14 +798,15 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k
798798
}
799799

800800
void worker_tails(Xbyak::Reg64& reg_tail_num, const std::function<void(int)>& func) {
801-
int tile_start_idx;
802-
if (isa == cpu::x64::avx512_core) {
803-
tile_start_idx = 0;
804-
} else if (isa == cpu::x64::avx2) {
805-
tile_start_idx = 1;
806-
} else {
807-
tile_start_idx = 2;
808-
}
801+
int tile_start_idx = [&]() {
802+
if (isa == cpu::x64::avx512_core) {
803+
return 0;
804+
}
805+
if (isa == cpu::x64::avx2) {
806+
return 1;
807+
}
808+
return 2;
809+
}();
809810
Label tile_exit[kTileNum];
810811
for (int i = tile_start_idx; i < kTileNum; i++) {
811812
cmp(reg_tail_num, tile_size[i]);
@@ -1727,14 +1728,15 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator
17271728
}
17281729

17291730
void worker_mvn_tails(Xbyak::Reg64& reg_tail_num, const std::function<void(int)>& func) {
1730-
int tile_start_idx;
1731-
if (isa == cpu::x64::avx512_core) {
1732-
tile_start_idx = 0;
1733-
} else if (isa == cpu::x64::avx2) {
1734-
tile_start_idx = 1;
1735-
} else {
1736-
tile_start_idx = 2;
1737-
}
1731+
int tile_start_idx = [&]() {
1732+
if (isa == cpu::x64::avx512_core) {
1733+
return 0;
1734+
}
1735+
if (isa == cpu::x64::avx2) {
1736+
return 1;
1737+
}
1738+
return 2;
1739+
}();
17381740
Label tile_exit[kTileNum];
17391741
for (int i = tile_start_idx; i < kTileNum; i++) {
17401742
cmp(reg_tail_num, tile_size[i]);

src/plugins/intel_cpu/src/nodes/pad.cpp

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -610,14 +610,15 @@ void Pad::PadExecutor::padEdge(const MemoryPtr& srcMemPtr, const MemoryPtr& dstM
610610
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
611611
size_t srcIdx = 0;
612612
for (size_t idx = 0; idx < params.nDimsForWork; ++idx) {
613-
size_t shift;
614-
if (indexes[idx] < params.attrs.padsBegin[idx]) {
615-
shift = 0;
616-
} else if (static_cast<size_t>(indexes[idx]) >= params.srcODims[idx]) {
617-
shift = params.srcDims[idx] - 1;
618-
} else {
619-
shift = indexes[idx] - params.attrs.padsBegin[idx];
620-
}
613+
size_t shift = [&]() {
614+
if (indexes[idx] < params.attrs.padsBegin[idx]) {
615+
return size_t(0);
616+
}
617+
if (static_cast<size_t>(indexes[idx]) >= params.srcODims[idx]) {
618+
return params.srcDims[idx] - 1;
619+
}
620+
return static_cast<size_t>(indexes[idx] - params.attrs.padsBegin[idx]);
621+
}();
621622
srcIdx += shift * params.srcStrides[idx];
622623
}
623624
srcIdx *= params.dataSize;
@@ -665,14 +666,15 @@ void Pad::PadExecutor::padReflectOrSymmetric(const MemoryPtr& srcMemPtr,
665666
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
666667
size_t srcIdx = 0;
667668
for (size_t i = 0; i < params.nDimsForWork; ++i) {
668-
size_t idx;
669-
if (indexes[i] < params.attrs.padsBegin[i]) {
670-
idx = params.attrs.padsBegin[i] - indexes[i] - shift;
671-
} else if (static_cast<size_t>(indexes[i]) >= params.srcODims[i]) {
672-
idx = params.srcDimsForReflectOrSymmetric[i] - indexes[i];
673-
} else {
674-
idx = indexes[i] - params.attrs.padsBegin[i];
675-
}
669+
size_t idx = [&]() -> size_t {
670+
if (indexes[i] < params.attrs.padsBegin[i]) {
671+
return params.attrs.padsBegin[i] - indexes[i] - shift;
672+
}
673+
if (static_cast<size_t>(indexes[i]) >= params.srcODims[i]) {
674+
return params.srcDimsForReflectOrSymmetric[i] - indexes[i];
675+
}
676+
return indexes[i] - params.attrs.padsBegin[i];
677+
}();
676678
srcIdx += idx * params.srcStrides[i];
677679
}
678680
srcIdx *= params.dataSize;

0 commit comments

Comments
 (0)