Skip to content

Commit 32839e7

Browse files
authored
[CPU] Apply 'readability-avoid-nested-conditional-operator' clang-tidy remarks (#30806)
### Details: - Fix "readability-avoid-nested-conditional-operator" remarks reported by clang-tidy - Enable "readability-avoid-nested-conditional-operator" clang-tidy checks on CI by default ### Tickets: - N/A
1 parent 16ba56b commit 32839e7

32 files changed

+489
-195
lines changed

src/plugins/intel_cpu/src/.clang-tidy

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@
4747
# -misc-header-include-cycle,
4848
# -misc-non-private-member-variables-in-classes,
4949
# -misc-use-anonymous-namespace,
50-
# -readability-avoid-nested-conditional-operator
5150
# -readability-const-return-type
5251
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
5352
# -readability-isolate-declaration
@@ -108,7 +107,6 @@ Checks: >
108107
-misc-no-recursion,
109108
-misc-non-private-member-variables-in-classes,
110109
-misc-use-anonymous-namespace,
111-
-readability-avoid-nested-conditional-operator,
112110
-readability-const-return-type,
113111
-readability-function-cognitive-complexity,
114112
-readability-identifier-length,

src/plugins/intel_cpu/src/cpu_memory.cpp

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -64,11 +64,9 @@ inline void setSubnormalsToZeroAndbf16Saturation(float* data, size_t size, bool
6464
if ((u32data[i] & (0xFF << 23)) == 0) {
6565
u32data[i] = 0;
6666
} else if (!std::isnan(floatdata[i]) && !std::isinf(floatdata[i])) {
67-
floatdata[i] = (floatdata[i] < static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest()))
68-
? static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest())
69-
: (floatdata[i] > static_cast<float>(std::numeric_limits<ov::bfloat16>::max()))
70-
? static_cast<float>(std::numeric_limits<ov::bfloat16>::max())
71-
: floatdata[i];
67+
floatdata[i] =
68+
std::min(std::max(floatdata[i], static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest())),
69+
static_cast<float>(std::numeric_limits<ov::bfloat16>::max()));
7270
}
7371
}
7472
} else if (ftz) {
@@ -80,11 +78,9 @@ inline void setSubnormalsToZeroAndbf16Saturation(float* data, size_t size, bool
8078
} else if (bf16saturation) {
8179
for (size_t i = 0; i < size; ++i) {
8280
if (!std::isnan(floatdata[i]) && !std::isinf(floatdata[i])) {
83-
floatdata[i] = (floatdata[i] < static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest()))
84-
? static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest())
85-
: (floatdata[i] > static_cast<float>(std::numeric_limits<ov::bfloat16>::max()))
86-
? static_cast<float>(std::numeric_limits<ov::bfloat16>::max())
87-
: floatdata[i];
81+
floatdata[i] =
82+
std::min(std::max(floatdata[i], static_cast<float>(std::numeric_limits<ov::bfloat16>::lowest())),
83+
static_cast<float>(std::numeric_limits<ov::bfloat16>::max()));
8884
}
8985
}
9086
}

src/plugins/intel_cpu/src/cpu_streams_calculation.cpp

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -362,9 +362,15 @@ std::vector<std::vector<int>> get_streams_info_table(
362362
n_streams = input_infer_requests > 0 ? std::min(n_streams, input_infer_requests) : n_streams;
363363
n_threads_per_stream = -1;
364364
} else {
365-
auto model_threads = n_threads == 1 ? 1
366-
: model_prefer_threads > n_threads ? n_threads / 2
367-
: model_prefer_threads;
365+
int model_threads = [&]() {
366+
if (n_threads == 1) {
367+
return 1;
368+
}
369+
if (model_prefer_threads > n_threads) {
370+
return n_threads / 2;
371+
}
372+
return model_prefer_threads;
373+
}();
368374
n_streams = ((n_threads + model_threads - 1) / model_threads);
369375
if ((input_infer_requests > 0) && (n_streams > input_infer_requests)) {
370376
n_streams = input_infer_requests;
@@ -688,12 +694,16 @@ int get_model_prefer_threads(const int num_streams,
688694
// By default the latency case uses (faster) Big cores only, depending on the compute ratio
689695
// But on MTL detected by ov::get_number_of_blocked_cores(), use Big and Little cores together in Big
690696
// cores only cases except LLM.
691-
model_prefer = proc_type_table[0][MAIN_CORE_PROC] > (proc_type_table[0][EFFICIENT_CORE_PROC] /
692-
(int8_intensive ? int8_threshold : fp32_threshold))
693-
? ((!llm_related && ov::get_number_of_blocked_cores())
694-
? proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC]
695-
: proc_type_table[0][MAIN_CORE_PROC])
696-
: proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC];
697+
bool use_all_cores =
698+
proc_type_table[0][MAIN_CORE_PROC] <=
699+
(proc_type_table[0][EFFICIENT_CORE_PROC] / (int8_intensive ? int8_threshold : fp32_threshold));
700+
bool use_big_and_little = !llm_related && (ov::get_number_of_blocked_cores() != 0);
701+
702+
if (use_all_cores || use_big_and_little) {
703+
model_prefer = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC];
704+
} else {
705+
model_prefer = proc_type_table[0][MAIN_CORE_PROC];
706+
}
697707
#endif
698708
}
699709
} else { // throughput

src/plugins/intel_cpu/src/emitters/plugin/x64/debug_capabilities.cpp

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,13 +128,23 @@ void RegPrinter::restore_reg(jit_generator& h) {
128128

129129
void RegPrinter::preamble(jit_generator& h) {
130130
save_reg(h);
131-
mayiuse(cpu_isa_t::avx512_core) ? save_vmm<Zmm>(h)
132-
: (mayiuse(cpu_isa_t::avx2) ? save_vmm<Ymm>(h) : save_vmm<Xmm>(h));
131+
if (mayiuse(cpu_isa_t::avx512_core)) {
132+
save_vmm<Zmm>(h);
133+
} else if (mayiuse(cpu_isa_t::avx2)) {
134+
save_vmm<Ymm>(h);
135+
} else {
136+
save_vmm<Xmm>(h);
137+
}
133138
}
134139

135140
void RegPrinter::postamble(jit_generator& h) {
136-
mayiuse(cpu_isa_t::avx512_core) ? restore_vmm<Zmm>(h)
137-
: (mayiuse(cpu_isa_t::avx2) ? restore_vmm<Ymm>(h) : restore_vmm<Xmm>(h));
141+
if (mayiuse(cpu_isa_t::avx512_core)) {
142+
restore_vmm<Zmm>(h);
143+
} else if (mayiuse(cpu_isa_t::avx2)) {
144+
restore_vmm<Ymm>(h);
145+
} else {
146+
restore_vmm<Xmm>(h);
147+
}
138148
restore_reg(h);
139149
}
140150

@@ -158,7 +168,15 @@ void RegPrinter::print_vmm(jit_generator& h, REG_T vmm, const char* name) {
158168
h.push(abi_param2);
159169
h.push(abi_param3);
160170
{
161-
const int vlen = vmm.isZMM() ? 64 : (vmm.isYMM() ? 32 : 16);
171+
int vlen = [&]() {
172+
if (vmm.isZMM()) {
173+
return 64;
174+
}
175+
if (vmm.isYMM()) {
176+
return 32;
177+
}
178+
return 16;
179+
}();
162180
h.sub(h.rsp, vlen);
163181
h.uni_vmovups(h.ptr[h.rsp], vmm);
164182

src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,13 @@ size_t jit_emitter::get_max_vecs_count() const {
3232
}
3333

3434
size_t jit_emitter::get_vec_length() const {
35-
return one_of(host_isa_, cpu::x64::avx512_core, cpu::x64::avx512_core) ? 64
36-
: one_of(host_isa_, cpu::x64::avx2) ? 32
37-
: 16;
35+
if (host_isa_ == cpu::x64::avx512_core) {
36+
return 64;
37+
}
38+
if (host_isa_ == cpu::x64::avx2) {
39+
return 32;
40+
}
41+
return 16;
3842
}
3943

4044
void jit_emitter::push_vec(const Xbyak::Address& addr, size_t vec_idx) const {

src/plugins/intel_cpu/src/infer_request.cpp

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -544,9 +544,15 @@ void SyncInferRequest::init_tensor(const std::size_t& port_index, const ov::ISyn
544544

545545
// WA, due to the transformations and constant folding, shape inference of the resulting model may
546546
// have static shapes, while they are dynamic in the initial representation
547-
const auto& shape = graph_shape.isDynamic()
548-
? port_shape
549-
: (port_shape.is_dynamic() ? graph_shape.toPartialShape() : port_shape);
547+
const auto shape = [&]() -> ov::PartialShape {
548+
if (graph_shape.isDynamic()) {
549+
return port_shape;
550+
}
551+
if (port_shape.is_dynamic()) {
552+
return graph_shape.toPartialShape();
553+
}
554+
return port_shape;
555+
}();
550556

551557
const bool isDynamic = shape.is_dynamic();
552558
tensor = ov::ISyncInferRequest::get_tensor(port);

src/plugins/intel_cpu/src/nodes/bin_conv.cpp

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -838,9 +838,15 @@ struct jit_uni_bin_conv_kernel_f32 : public jit_uni_bin_conv_kernel, public jit_
838838

839839
int nbits = 8;
840840
const int inp_mult = div_up(jcp_.ic, nbits);
841-
const int out_mult = jcp_.with_dw_conv ? jcp_.oc_block
842-
: jcp_.with_binarization ? div_up(jcp_.oc, nbits)
843-
: jcp_.oc;
841+
int out_mult = [&]() {
842+
if (jcp_.with_dw_conv) {
843+
return jcp_.oc_block;
844+
}
845+
if (jcp_.with_binarization) {
846+
return div_up(jcp_.oc, nbits);
847+
}
848+
return jcp_.oc;
849+
}();
844850

845851
int l_pad = jcp_.l_pad;
846852
int r_pad = nstl::max(0, (jcp_.ow - 1) * str_w + (kw - 1) * dilate_w - (iw + l_pad - 1));
@@ -1168,10 +1174,13 @@ void BinaryConvolution::createPrimitive() {
11681174
jcp.oc_block = simd_w;
11691175
jcp.nb_oc = div_up(jcp.oc, jcp.oc_block);
11701176

1171-
jcp.nb_oc_blocking = nstl::min(implType == impl_desc_type::jit_sse42 ? 2
1172-
: implType == impl_desc_type::jit_avx2 ? 4
1173-
: 6,
1174-
jcp.nb_oc);
1177+
if (implType == impl_desc_type::jit_sse42) {
1178+
jcp.nb_oc_blocking = nstl::min(2, jcp.nb_oc);
1179+
} else if (implType == impl_desc_type::jit_avx2) {
1180+
jcp.nb_oc_blocking = nstl::min(4, jcp.nb_oc);
1181+
} else {
1182+
jcp.nb_oc_blocking = nstl::min(6, jcp.nb_oc);
1183+
}
11751184

11761185
auto srcPrecision = getParentEdgeAt(0)->getMemory().getDesc().getPrecision();
11771186
auto dstPrecision = getChildEdgeAt(0)->getMemory().getDesc().getPrecision();

src/plugins/intel_cpu/src/nodes/conv.cpp

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -233,11 +233,13 @@ Convolution::Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext
233233
}
234234
m_attrs.paddingL = convolutionOp->get_pads_begin();
235235
m_attrs.paddingR = convolutionOp->get_pads_end();
236-
m_attrs.autoPadding =
237-
convolutionOp->get_auto_pad() == ov::op::PadType::SAME_UPPER
238-
? AutoPaddingType::SAME_UPPER
239-
: (convolutionOp->get_auto_pad() == ov::op::PadType::SAME_LOWER ? AutoPaddingType::SAME_LOWER
240-
: AutoPaddingType::None);
236+
if (convolutionOp->get_auto_pad() == ov::op::PadType::SAME_UPPER) {
237+
m_attrs.autoPadding = AutoPaddingType::SAME_UPPER;
238+
} else if (convolutionOp->get_auto_pad() == ov::op::PadType::SAME_LOWER) {
239+
m_attrs.autoPadding = AutoPaddingType::SAME_LOWER;
240+
} else {
241+
m_attrs.autoPadding = AutoPaddingType::None;
242+
}
241243
} else if (groupConvolutionOp) {
242244
algorithm = Algorithm::ConvolutionGrouped;
243245
m_attrs.isGrouped = true;
@@ -258,11 +260,13 @@ Convolution::Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext
258260
}
259261
m_attrs.paddingL = groupConvolutionOp->get_pads_begin();
260262
m_attrs.paddingR = groupConvolutionOp->get_pads_end();
261-
m_attrs.autoPadding =
262-
groupConvolutionOp->get_auto_pad() == ov::op::PadType::SAME_UPPER
263-
? AutoPaddingType::SAME_UPPER
264-
: (groupConvolutionOp->get_auto_pad() == ov::op::PadType::SAME_LOWER ? AutoPaddingType::SAME_LOWER
265-
: AutoPaddingType::None);
263+
if (groupConvolutionOp->get_auto_pad() == ov::op::PadType::SAME_UPPER) {
264+
m_attrs.autoPadding = AutoPaddingType::SAME_UPPER;
265+
} else if (groupConvolutionOp->get_auto_pad() == ov::op::PadType::SAME_LOWER) {
266+
m_attrs.autoPadding = AutoPaddingType::SAME_LOWER;
267+
} else {
268+
m_attrs.autoPadding = AutoPaddingType::None;
269+
}
266270
}
267271
// Only apply this heuristic logic on FP32 IR. IC=1 ,OC=1 would disable brgconv on avx2.
268272
const bool isAvx2FP32 = !dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) &&

src/plugins/intel_cpu/src/nodes/deconv.cpp

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -384,9 +384,15 @@ bool Deconvolution::canBeExecutedInInt8() const {
384384
}
385385

386386
// not supported in oneDNN
387-
int channelBlock = impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core) ? 16
388-
: impl::cpu::x64::mayiuse(impl::cpu::x64::avx2) ? 8
389-
: 4;
387+
int channelBlock = [&]() {
388+
if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core)) {
389+
return 16;
390+
}
391+
if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx2)) {
392+
return 8;
393+
}
394+
return 4;
395+
}();
390396
if (withGroups && !isDW && (IC % channelBlock != 0 || OC % channelBlock != 0)) {
391397
return false;
392398
}
@@ -649,9 +655,15 @@ void Deconvolution::getSupportedDescriptors() {
649655

650656
if (isInt8) {
651657
const auto& rank = getInputShapeAtPort(0).getRank();
652-
auto format = rank == 5 ? dnnl::memory::format_tag::ndhwc
653-
: rank == 4 ? dnnl::memory::format_tag::nhwc
654-
: dnnl::memory::format_tag::nwc;
658+
dnnl::memory::format_tag format = [&]() {
659+
if (rank == 5) {
660+
return dnnl::memory::format_tag::ndhwc;
661+
}
662+
if (rank == 4) {
663+
return dnnl::memory::format_tag::nhwc;
664+
}
665+
return dnnl::memory::format_tag::nwc;
666+
}();
655667
MemoryDescPtr in_candidate =
656668
std::make_shared<DnnlBlockedMemoryDesc>(getInputShapeAtPort(0), inputDataType, format);
657669
MemoryDescPtr out_candidate =

src/plugins/intel_cpu/src/nodes/depth_to_space.cpp

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,15 @@ void DepthToSpace::createPrimitive() {
204204
const auto& memoryDesc = srcMemPtr->getDesc();
205205
attrs.dataSize = memoryDesc.getPrecision().size();
206206
attrs.nSpatialDims = memoryDesc.getShape().getRank() - 2;
207-
attrs.layoutType = memoryDesc.hasLayoutType(LayoutType::nCsp16c) ? LayoutType::nCsp16c
208-
: memoryDesc.hasLayoutType(LayoutType::nCsp8c) ? LayoutType::nCsp8c
209-
: memoryDesc.hasLayoutType(LayoutType::nspc) ? LayoutType::nspc
210-
: LayoutType::ncsp;
207+
if (memoryDesc.hasLayoutType(LayoutType::nCsp16c)) {
208+
attrs.layoutType = LayoutType::nCsp16c;
209+
} else if (memoryDesc.hasLayoutType(LayoutType::nCsp8c)) {
210+
attrs.layoutType = LayoutType::nCsp8c;
211+
} else if (memoryDesc.hasLayoutType(LayoutType::nspc)) {
212+
attrs.layoutType = LayoutType::nspc;
213+
} else {
214+
attrs.layoutType = LayoutType::ncsp;
215+
}
211216

212217
if (inputShapesDefined()) {
213218
if (needPrepareParams()) {

src/plugins/intel_cpu/src/nodes/eltwise.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,9 +1594,13 @@ void Eltwise::initSupportedPrimitiveDescriptors() {
15941594
outputPrecision = fusedWith[fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(0);
15951595
}
15961596

1597-
implType = canUseOptimizedShapeAgnosticImpl ? EltwiseImplType::optimizedShapeAgnostic
1598-
: canUseOptimizedImpl ? EltwiseImplType::optimized
1599-
: EltwiseImplType::reference;
1597+
if (canUseOptimizedShapeAgnosticImpl) {
1598+
implType = EltwiseImplType::optimizedShapeAgnostic;
1599+
} else if (canUseOptimizedImpl) {
1600+
implType = EltwiseImplType::optimized;
1601+
} else {
1602+
implType = EltwiseImplType::reference;
1603+
}
16001604

16011605
const auto useJitExecutor = one_of(implType, EltwiseImplType::optimizedShapeAgnostic, EltwiseImplType::optimized);
16021606

src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,15 @@ void ov::intel_cpu::InterpolateExecutor::buildTblLinearOnnx(const VectorDims& sr
221221
// FrontTopLeft:0, FrontTopRight:1, FrontBottomLeft:2, FrontBottomRight:3,
222222
// EndTopLeft:4, EndTopRight:5, EndBottomLeft:6, EndBottomRight:7
223223
// weight: Left:0, ritht:1, top:2, bottom:3, front:4, end:5
224-
int eltInGrid = (spatialDimSize > 2) ? MAX_INPUT_INTERPOLATE : ((spatialDimSize > 1) ? 4 : 2);
224+
int eltInGrid = [&]() -> int {
225+
if (spatialDimSize > 2) {
226+
return MAX_INPUT_INTERPOLATE;
227+
}
228+
if (spatialDimSize > 1) {
229+
return 4;
230+
}
231+
return 2;
232+
}();
225233
int idxType = 2;
226234
int scratchLen = rnd_up(eltInGrid * OW * OH * OD, 16);
227235
indexTable.resize(idxType * scratchLen);

0 commit comments

Comments
 (0)