Skip to content

Commit c421f6e

Browse files
committed
Readability part 2
1 parent ef9c8d9 commit c421f6e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+223
-209
lines changed

src/plugins/intel_cpu/src/.clang-tidy

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,12 @@
4949
# -misc-non-private-member-variables-in-classes,
5050
# -misc-use-anonymous-namespace,
5151
# -readability-avoid-nested-conditional-operator
52-
# -readability-avoid-return-with-void-value
5352
# -readability-const-return-type
54-
# -readability-convert-member-functions-to-static
5553
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
56-
# -readability-inconsistent-declaration-parameter-name
5754
# -readability-isolate-declaration
5855
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
5956
# -readability-simplify-boolean-expr
60-
# -readability-static-accessed-through-instance
6157
# -readability-suspicious-call-argument
62-
# -readability-use-anyofallof
6358
# Remove warning disablement after CI pipeline migrates to C++17 from C++20 for:
6459
# -modernize-use-constraints,
6560
# -modernize-use-std-numbers
@@ -116,19 +111,14 @@ Checks: >
116111
-misc-non-private-member-variables-in-classes,
117112
-misc-use-anonymous-namespace,
118113
-readability-avoid-nested-conditional-operator,
119-
-readability-avoid-return-with-void-value,
120114
-readability-const-return-type,
121-
-readability-convert-member-functions-to-static,
122115
-readability-function-cognitive-complexity,
123116
-readability-identifier-length,
124-
-readability-inconsistent-declaration-parameter-name,
125117
-readability-isolate-declaration,
126118
-readability-magic-numbers,
127119
-readability-simplify-boolean-expr,
128-
-readability-static-accessed-through-instance,
129120
-readability-suspicious-call-argument,
130121
-readability-uppercase-literal-suffix,
131-
-readability-use-anyofallof,
132122
# Treat warnings as errors
133123
WarningsAsErrors: '*'
134124
# Use clang-format for applied fixes

src/plugins/intel_cpu/src/config.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ struct Config {
120120
// is reserved.
121121
bool DAZOn = false;
122122

123-
void readProperties(const ov::AnyMap& config, const ModelType modelType = ModelType::Unknown);
123+
void readProperties(const ov::AnyMap& prop, const ModelType modelType = ModelType::Unknown);
124124

125125
void updateProperties();
126126

src/plugins/intel_cpu/src/cpu_memory.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -449,7 +449,7 @@ using MemoryPtr = std::shared_ptr<IMemory>;
449449
using MemoryCPtr = std::shared_ptr<const IMemory>;
450450
using StringMemoryPtr = std::shared_ptr<StringMemory>;
451451

452-
bool mbind_move(void* data, size_t size, int numaNodeID);
452+
bool mbind_move(void* data, size_t size, int targetNode);
453453
bool mbind_move(const MemoryCPtr& mem, int numaNodeID);
454454
bool mbind_move(const dnnl::memory& mem, int numaNodeID);
455455

src/plugins/intel_cpu/src/cpu_streams_calculation.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ std::vector<std::vector<int>> get_streams_info_table(
5050
const int input_infer_requests,
5151
const int model_prefer_threads,
5252
const std::string& input_perf_hint,
53-
const std::set<ov::hint::ModelDistributionPolicy>& hint_llm_distribution_policy,
53+
const std::set<ov::hint::ModelDistributionPolicy>& hint_model_distribution_policy,
5454
const std::vector<std::vector<int>>& proc_type_table);
5555

5656
/**

src/plugins/intel_cpu/src/dnnl_extension_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ class DnnlExtensionUtils {
119119
return;
120120
}
121121

122-
static bool find_implementation(dnnl::primitive_desc& desc, impl_desc_type implType);
122+
static bool find_implementation(dnnl::primitive_desc& desc, impl_desc_type impl_type);
123123
static dnnl_primitive_desc_t clone_primitive_desc(const_dnnl_primitive_desc_t cprim_desc);
124124
static dnnl_memory_desc_t clone_desc(const_dnnl_memory_desc_t cdesc);
125125
static const char* query_pd_info(const_dnnl_primitive_desc_t pd);

src/plugins/intel_cpu/src/dnnl_postops_composer.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -952,9 +952,10 @@ void DnnlPostOpsComposer::appendAttrPostOpsLegacy(const FakeQuantizePostOp& post
952952
std::fill(binarizationThresholds.begin() + realAxisSize, binarizationThresholds.end(), 0.F);
953953
}
954954

955-
return ops.append_binarization(dnnl::algorithm::binarization_depthwise,
956-
reinterpret_cast<const float*>(binarizationThresholds.data()),
957-
reinterpret_cast<const float*>(binarizationOutputMask.data()));
955+
ops.append_binarization(dnnl::algorithm::binarization_depthwise,
956+
reinterpret_cast<const float*>(binarizationThresholds.data()),
957+
reinterpret_cast<const float*>(binarizationOutputMask.data()));
958+
return;
958959
}
959960

960961
dnnl::algorithm alg = postOp.type() == FakeQuantizePostOp::Type::quantization_only

src/plugins/intel_cpu/src/emitters/plugin/x64/debug_capabilities.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ class RegPrinter {
7777
template <typename PRC_T, size_t vlen>
7878
static void print_vmm_prc(const char* name, const char* ori_name, PRC_T* ptr);
7979
template <typename T>
80-
static void print_reg_prc(const char* name, const char* ori_name, T* val);
80+
static void print_reg_prc(const char* name, const char* ori_name, T* ptr);
8181
static void preamble(jit_generator& h);
8282
static void postamble(jit_generator& h);
8383
template <typename T>

src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,13 @@ class jit_store_emitter : public jit_emitter {
165165
void store_bytes(const Xbyak::Reg64& reg, int offset, int store_size) const;
166166

167167
template <typename Vmm>
168-
void store_dword_to_byte_extension(const Xbyak::Reg64& reg, int offset, bool is_signed, int store_size) const;
168+
void store_dword_to_byte_extension(const Xbyak::Reg64& reg, int offset, bool is_signed, int store_num) const;
169169

170170
template <typename Vmm>
171171
void store_dword_to_word_extension(const Xbyak::Reg64& reg,
172172
int offset,
173173
ov::element::Type precision,
174-
int store_size) const;
174+
int store_num) const;
175175

176176
void register_table_entries() override;
177177

src/plugins/intel_cpu/src/emitters/snippets/x64/jit_fill_emitter.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,9 @@ class jit_fill_emitter : public jit_emitter {
3434
template <dnnl::impl::cpu::x64::cpu_isa_t isa>
3535
void emit_isa(const std::vector<size_t>& in, const std::vector<size_t>& out) const;
3636
template <typename Vmm>
37-
void fill_full(const Vmm& vmm_dst) const;
37+
void fill_full(const Vmm& dst_vmm) const;
3838
template <typename Vmm>
39-
void fill_tail(const Vmm& vmm_src, const Vmm& vmm_dst) const;
39+
void fill_tail(const Vmm& src_vmm, const Vmm& dst_vmm) const;
4040

4141
bool is_full_reg() const {
4242
return offset == 0;

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ std::shared_ptr<BrgemmCompiledKernel> BrgemmKernelExecutor::compile_kernel(const
9090
void BrgemmKernelExecutor::update_config(const ov::snippets::lowered::ExpressionPtr& expr,
9191
const ov::snippets::lowered::LinearIRCPtr& linear_ir,
9292
BrgemmKernelConfig& config) const {
93-
return BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
93+
BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
9494
}
9595

9696
void BrgemmKernelExecutor::execute(const BrgemmKernelExecutor* executor, call_args* args) {

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_amx.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ void BrgemmAMXKernelExecutor::create_brgemm_copy_a_kernel(
247247
void BrgemmAMXKernelExecutor::update_config(const ov::snippets::lowered::ExpressionPtr& expr,
248248
const ov::snippets::lowered::LinearIRCPtr& linear_ir,
249249
BrgemmAMXKernelConfig& config) const {
250-
return BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
250+
BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
251251
}
252252

253253
void BrgemmAMXKernelExecutor::configure_tiles_if_needed(amx_tile_config_t* config,

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ struct BrgemmCopyBKernelConfig : public snippets::KernelExecutorBase::GenericCon
112112

113113
private:
114114
struct StaticParams {
115-
StaticParams(const element::Type& src_dt,
116-
const element::Type& wei_dt,
115+
StaticParams(const element::Type& src_type,
116+
const element::Type& wei_type,
117117
dnnl::impl::cpu::x64::cpu_isa_t isa,
118118
bool is_with_comp,
119119
bool is_transposed_B,

src/plugins/intel_cpu/src/graph.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -405,12 +405,10 @@ void Graph::Activate() {
405405
void Graph::Configure([[maybe_unused]] bool optimize) {
406406
OPENVINO_ASSERT(status == Status::NotReady, "Invalid graph status");
407407

408-
GraphOptimizer optimizer;
409-
410408
SortTopologically();
411409
InitNodes();
412410

413-
optimizer.ApplyCommonGraphOptimizations(*this);
411+
ov::intel_cpu::GraphOptimizer::ApplyCommonGraphOptimizations(*this);
414412

415413
SortTopologically();
416414

@@ -429,7 +427,7 @@ void Graph::Configure([[maybe_unused]] bool optimize) {
429427

430428
ResolveComplexInplaceConflicts();
431429

432-
optimizer.ApplyImplSpecificGraphOptimizations(*this);
430+
ov::intel_cpu::GraphOptimizer::ApplyImplSpecificGraphOptimizations(*this);
433431

434432
SortTopologically();
435433

src/plugins/intel_cpu/src/graph.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ class Graph {
310310

311311
bool graphHasDynamicInput = false;
312312

313-
void Replicate(const std::shared_ptr<const ov::Model>& subgraph,
313+
void Replicate(const std::shared_ptr<const ov::Model>& model,
314314
const std::vector<node::Input::InputConfig>& inputConfigs = {},
315315
const std::vector<node::Input::OutputConfig>& outputConfigs = {});
316316

src/plugins/intel_cpu/src/graph_optimizer.cpp

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1650,11 +1650,11 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph)
16501650
}
16511651

16521652
if (fuseCandidate->getAlgorithm() == Algorithm::EltwiseAdd) {
1653-
for (auto& fusedNode : binConv->fusedWith) {
1654-
const auto eltwise = std::dynamic_pointer_cast<Eltwise>(fusedNode);
1655-
if (eltwise && eltwise->isSpecialConvolutionAddFusing()) {
1656-
return false;
1657-
}
1653+
if (!std::all_of(binConv->fusedWith.begin(), binConv->fusedWith.end(), [](const NodePtr& fusedNode) {
1654+
const auto eltwise = std::dynamic_pointer_cast<Eltwise>(fusedNode);
1655+
return !(eltwise && eltwise->isSpecialConvolutionAddFusing());
1656+
})) {
1657+
return false;
16581658
}
16591659
return true;
16601660
}
@@ -1672,12 +1672,10 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph)
16721672
}
16731673

16741674
auto checkFusedWithSum = [](Convolution* conv) -> bool {
1675-
for (const auto& node : conv->getFusedWith()) {
1675+
return std::any_of(conv->getFusedWith().begin(), conv->getFusedWith().end(), [](const NodePtr& node) {
16761676
const auto eltwise = std::dynamic_pointer_cast<Eltwise>(node);
1677-
if (eltwise && eltwise->isSpecialConvolutionAddFusing()) {
1678-
return true;
1679-
}
1680-
}
1677+
return eltwise && eltwise->isSpecialConvolutionAddFusing();
1678+
});
16811679
return false;
16821680
};
16831681

@@ -3186,10 +3184,10 @@ void GraphOptimizer::RemoveConvertMemoryOutput(Graph& graph) {
31863184
}
31873185

31883186
auto&& childEdges = node->getChildEdgesAtPort(0);
3189-
for (auto&& edge : childEdges) {
3190-
if (Type::MemoryOutput != edge->getChild()->getType()) {
3191-
return false;
3192-
}
3187+
if (!std::all_of(childEdges.begin(), childEdges.end(), [](const auto& edge) {
3188+
return Type::MemoryOutput == edge->getChild()->getType();
3189+
})) {
3190+
return false;
31933191
}
31943192

31953193
return true;

src/plugins/intel_cpu/src/graph_optimizer.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class GraphOptimizer {
1717

1818
public:
1919
static void ApplyCommonGraphOptimizations(Graph& graph);
20-
void ApplyImplSpecificGraphOptimizations(Graph& graph);
20+
static void ApplyImplSpecificGraphOptimizations(Graph& graph);
2121
static void ShareReorders(Graph& graph);
2222

2323
private:
@@ -49,8 +49,8 @@ class GraphOptimizer {
4949
static void FuseEltwiseAndSimple(Graph& graph);
5050
static void FusePerformedAsScaleShiftAndFakeQuantize(Graph& graph);
5151
static void FuseClampAndFakeQuantize(Graph& graph);
52-
void MergeTransposeAndReorder(Graph& graph);
53-
void MergeReorderAndTranspose(Graph& graph);
52+
static void MergeTransposeAndReorder(Graph& graph);
53+
static void MergeReorderAndTranspose(Graph& graph);
5454
static void reshapeRnnSeq(Graph& graph);
5555
static void RemoveSameConvert(Graph& graph);
5656
static void RemoveMemoryInputConvert(Graph& graph);

src/plugins/intel_cpu/src/node.cpp

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,7 @@ void Node::remove() {
234234
}
235235

236236
bool Node::isEdgesEmpty(const std::vector<EdgeWeakPtr>& edges) {
237+
// NOLINTNEXTLINE(readability-use-anyofallof)
237238
for (const auto& edge : edges) {
238239
if (edge.lock()) {
239240
return false;
@@ -371,7 +372,8 @@ void Node::selectPreferPrimitiveDescriptorWithShape(const std::vector<impl_desc_
371372
bool ignoreConstInputs) {
372373
// Filter out dynamic shape.
373374
if (isDynamic) {
374-
return selectPreferPrimitiveDescriptor(priority, ignoreConstInputs);
375+
selectPreferPrimitiveDescriptor(priority, ignoreConstInputs);
376+
return;
375377
}
376378

377379
auto estimateReorderOverhead = [&](const ov::intel_cpu::NodeDesc& supportedPrimitiveDesc,
@@ -835,9 +837,10 @@ void Node::updateDynamicParams() {
835837

836838
void Node::execute(const dnnl::stream& strm, int numaId) {
837839
if (isDynamicNode()) {
838-
return executeDynamic(strm, numaId);
840+
executeDynamic(strm, numaId);
841+
} else {
842+
executeStatic(strm, numaId);
839843
}
840-
return executeStatic(strm, numaId);
841844
}
842845

843846
void Node::executeStatic(const dnnl::stream& strm, int numaId) {
@@ -1218,7 +1221,7 @@ void Node::toNumaNode(int numaNodeID) {
12181221
return;
12191222
}
12201223

1221-
return toNumaNodeImpl(numaNodeID);
1224+
toNumaNodeImpl(numaNodeID);
12221225
}
12231226

12241227
void Node::toNumaNodeImpl(int numaNodeID) {
@@ -1521,13 +1524,9 @@ void Node::appendPostOpArgs([[maybe_unused]] const dnnl::primitive_attr& attr,
15211524
}
15221525

15231526
bool Node::isFusedWith(Type fusedNodeType) const {
1524-
for (const auto& fusedNode : fusedWith) {
1525-
if (fusedNode->type == fusedNodeType) {
1526-
return true;
1527-
}
1528-
}
1529-
1530-
return false;
1527+
return std::any_of(fusedWith.begin(), fusedWith.end(), [fusedNodeType](const NodePtr& fusedNode) {
1528+
return fusedNode->type == fusedNodeType;
1529+
});
15311530
}
15321531

15331532
dnnl::memory::format_tag Node::getWeightsFormatTagByDims(const VectorDims& dims) {
@@ -2155,6 +2154,7 @@ void Node::resolveInPlaceDirection() {
21552154
} else if (parentInPlaceDirection == InplaceDirectionType::DOWN) {
21562155
// search if siblings already have downstream direction
21572156
auto downstreamPeers = [&] {
2157+
// NOLINTNEXTLINE(readability-use-anyofallof)
21582158
for (auto& peerEdge : pParent->getChildEdgesAtPort(pEdge->getInputNum())) {
21592159
auto* peerNode = peerEdge->getChild().get();
21602160
if (peerNode == this) {
@@ -2234,8 +2234,8 @@ void Node::resolveInPlaceDirection() {
22342234
} else {
22352235
auto result = inPlaceDirection(pChild, PortType::INPUT, edge->getOutputNum());
22362236
if (InplaceDirectionType::CYCLIC == result) {
2237-
return searchReferencingOutput(pChild,
2238-
pChild->inPlaceInputPort(edge->getOutputNum()));
2237+
searchReferencingOutput(pChild, pChild->inPlaceInputPort(edge->getOutputNum()));
2238+
return;
22392239
}
22402240
}
22412241
}

src/plugins/intel_cpu/src/node.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,7 @@ class Node {
527527
void updateShapes();
528528
void updateDynamicParams();
529529
void executeDynamic(const dnnl::stream& strm, int numaId = -1);
530-
virtual void redefineOutputMemory(const std::vector<VectorDims>& newShapes);
530+
virtual void redefineOutputMemory(const std::vector<VectorDims>& newOutputShapes);
531531
void redefineOutputMemory(const size_t port, const VectorDims& new_output_shape) const;
532532
bool outputShapeDataDependency() const;
533533

@@ -766,8 +766,8 @@ class Node {
766766

767767
int curNumaNode = -1;
768768

769-
void toNumaNode(int numaID);
770-
virtual void toNumaNodeImpl(int numaID);
769+
void toNumaNode(int numaNodeID);
770+
virtual void toNumaNodeImpl(int numaNodeID);
771771

772772
std::string primitivesPriority;
773773
std::vector<impl_desc_type> customImplPriorities;
@@ -781,8 +781,8 @@ class Node {
781781
Node(const std::shared_ptr<ov::Node>& op, GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory);
782782

783783
Node(const std::string& type,
784-
std::vector<Shape> inputShapes,
785-
std::vector<Shape> outputShapes,
784+
std::vector<Shape> inShapes,
785+
std::vector<Shape> outShapes,
786786
std::vector<ov::element::Type> originalInputPrecisions,
787787
std::vector<ov::element::Type> originalOutputPrecisions,
788788
const std::string& name,
@@ -864,10 +864,10 @@ class Node {
864864

865865
bool inputShapesModified() const;
866866
virtual bool needShapeInfer() const;
867-
std::vector<VectorDims> shapeInferGeneric(const std::vector<Shape>& inputDims) const;
867+
std::vector<VectorDims> shapeInferGeneric(const std::vector<Shape>& shapes) const;
868868
virtual IShapeInfer::Result shapeInfer() const;
869869

870-
void execute(const dnnl::stream& stream, int numaId);
870+
void execute(const dnnl::stream& strm, int numaId);
871871
virtual void execute(const dnnl::stream& strm) = 0;
872872
// TODO [DS] : make pure after all nodes support dynamic shapes
873873
virtual void executeDynamicImpl(const dnnl::stream& strm) {

src/plugins/intel_cpu/src/nodes/common/tile_broadcast_utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ class TileBroadcastCommon {
3535
VectorDims& optimizedSrcStrides);
3636
static void broadcastScalar(const char* srcData, char* dstData, size_t elt_cnt, size_t data_size);
3737

38-
static bool canBeExecutedInBlockedLayout(VectorDims srcDims, VectorDims repeats, const size_t elemsInBlock);
39-
static bool canBeExecutedInNSPCLayout(VectorDims srcDims, VectorDims repeats);
38+
static bool canBeExecutedInBlockedLayout(VectorDims srcBlockedDims, VectorDims repeats, const size_t elemsInBlock);
39+
static bool canBeExecutedInNSPCLayout(VectorDims srcBlockedDims, VectorDims repeats);
4040

4141
struct {
4242
VectorDims dims;

src/plugins/intel_cpu/src/nodes/conv.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -698,7 +698,8 @@ void Convolution::prepareParams() {
698698

699699
void Convolution::redefineOutputMemory(const std::vector<VectorDims>& newOutputShapes) {
700700
if (!withSum) { // fast path
701-
return Node::redefineOutputMemory(newOutputShapes);
701+
Node::redefineOutputMemory(newOutputShapes);
702+
return;
702703
}
703704

704705
const size_t sumPortNum = getParentEdges().size() - 1;

0 commit comments

Comments
 (0)