Skip to content

[CPU] Apply part of manual 'readability-*' clang-tidy remarks #30807

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions src/plugins/intel_cpu/src/.clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -49,17 +49,12 @@
# -misc-non-private-member-variables-in-classes,
# -misc-use-anonymous-namespace,
# -readability-avoid-nested-conditional-operator
# -readability-avoid-return-with-void-value
# -readability-const-return-type
# -readability-convert-member-functions-to-static
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
# -readability-inconsistent-declaration-parameter-name
# -readability-isolate-declaration
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
# -readability-simplify-boolean-expr
# -readability-static-accessed-through-instance
# -readability-suspicious-call-argument
# -readability-use-anyofallof
# Remove warning disablement after CI pipeline migrates to C++17 from C++20 for:
# -modernize-use-constraints,
# -modernize-use-std-numbers
Expand Down Expand Up @@ -116,19 +111,14 @@ Checks: >
-misc-non-private-member-variables-in-classes,
-misc-use-anonymous-namespace,
-readability-avoid-nested-conditional-operator,
-readability-avoid-return-with-void-value,
-readability-const-return-type,
-readability-convert-member-functions-to-static,
-readability-function-cognitive-complexity,
-readability-identifier-length,
-readability-inconsistent-declaration-parameter-name,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-simplify-boolean-expr,
-readability-static-accessed-through-instance,
-readability-suspicious-call-argument,
-readability-uppercase-literal-suffix,
-readability-use-anyofallof,
# Treat warnings as errors
WarningsAsErrors: '*'
# Use clang-format for applied fixes
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ struct Config {
// is reserved.
bool DAZOn = false;

void readProperties(const ov::AnyMap& config, const ModelType modelType = ModelType::Unknown);
void readProperties(const ov::AnyMap& prop, const ModelType modelType = ModelType::Unknown);

void updateProperties();

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/cpu_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ using MemoryPtr = std::shared_ptr<IMemory>;
using MemoryCPtr = std::shared_ptr<const IMemory>;
using StringMemoryPtr = std::shared_ptr<StringMemory>;

bool mbind_move(void* data, size_t size, int numaNodeID);
bool mbind_move(void* data, size_t size, int targetNode);
bool mbind_move(const MemoryCPtr& mem, int numaNodeID);
bool mbind_move(const dnnl::memory& mem, int numaNodeID);

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/cpu_streams_calculation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ std::vector<std::vector<int>> get_streams_info_table(
const int input_infer_requests,
const int model_prefer_threads,
const std::string& input_perf_hint,
const std::set<ov::hint::ModelDistributionPolicy>& hint_llm_distribution_policy,
const std::set<ov::hint::ModelDistributionPolicy>& hint_model_distribution_policy,
const std::vector<std::vector<int>>& proc_type_table);

/**
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/dnnl_extension_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ class DnnlExtensionUtils {
return;
}

static bool find_implementation(dnnl::primitive_desc& desc, impl_desc_type implType);
static bool find_implementation(dnnl::primitive_desc& desc, impl_desc_type impl_type);
static dnnl_primitive_desc_t clone_primitive_desc(const_dnnl_primitive_desc_t cprim_desc);
static dnnl_memory_desc_t clone_desc(const_dnnl_memory_desc_t cdesc);
static const char* query_pd_info(const_dnnl_primitive_desc_t pd);
Expand Down
7 changes: 4 additions & 3 deletions src/plugins/intel_cpu/src/dnnl_postops_composer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -952,9 +952,10 @@ void DnnlPostOpsComposer::appendAttrPostOpsLegacy(const FakeQuantizePostOp& post
std::fill(binarizationThresholds.begin() + realAxisSize, binarizationThresholds.end(), 0.F);
}

return ops.append_binarization(dnnl::algorithm::binarization_depthwise,
reinterpret_cast<const float*>(binarizationThresholds.data()),
reinterpret_cast<const float*>(binarizationOutputMask.data()));
ops.append_binarization(dnnl::algorithm::binarization_depthwise,
reinterpret_cast<const float*>(binarizationThresholds.data()),
reinterpret_cast<const float*>(binarizationOutputMask.data()));
return;
}

dnnl::algorithm alg = postOp.type() == FakeQuantizePostOp::Type::quantization_only
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class RegPrinter {
template <typename PRC_T, size_t vlen>
static void print_vmm_prc(const char* name, const char* ori_name, PRC_T* ptr);
template <typename T>
static void print_reg_prc(const char* name, const char* ori_name, T* val);
static void print_reg_prc(const char* name, const char* ori_name, T* ptr);
static void preamble(jit_generator& h);
static void postamble(jit_generator& h);
template <typename T>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,13 +165,13 @@ class jit_store_emitter : public jit_emitter {
void store_bytes(const Xbyak::Reg64& reg, int offset, int store_size) const;

template <typename Vmm>
void store_dword_to_byte_extension(const Xbyak::Reg64& reg, int offset, bool is_signed, int store_size) const;
void store_dword_to_byte_extension(const Xbyak::Reg64& reg, int offset, bool is_signed, int store_num) const;

template <typename Vmm>
void store_dword_to_word_extension(const Xbyak::Reg64& reg,
int offset,
ov::element::Type precision,
int store_size) const;
int store_num) const;

void register_table_entries() override;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ class jit_fill_emitter : public jit_emitter {
template <dnnl::impl::cpu::x64::cpu_isa_t isa>
void emit_isa(const std::vector<size_t>& in, const std::vector<size_t>& out) const;
template <typename Vmm>
void fill_full(const Vmm& vmm_dst) const;
void fill_full(const Vmm& dst_vmm) const;
template <typename Vmm>
void fill_tail(const Vmm& vmm_src, const Vmm& vmm_dst) const;
void fill_tail(const Vmm& src_vmm, const Vmm& dst_vmm) const;

bool is_full_reg() const {
return offset == 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ std::shared_ptr<BrgemmCompiledKernel> BrgemmKernelExecutor::compile_kernel(const
void BrgemmKernelExecutor::update_config(const ov::snippets::lowered::ExpressionPtr& expr,
const ov::snippets::lowered::LinearIRCPtr& linear_ir,
BrgemmKernelConfig& config) const {
return BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
}

void BrgemmKernelExecutor::execute(const BrgemmKernelExecutor* executor, call_args* args) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ void BrgemmAMXKernelExecutor::create_brgemm_copy_a_kernel(
void BrgemmAMXKernelExecutor::update_config(const ov::snippets::lowered::ExpressionPtr& expr,
const ov::snippets::lowered::LinearIRCPtr& linear_ir,
BrgemmAMXKernelConfig& config) const {
return BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
BrgemmBaseKernelExecutor::update_config(expr, linear_ir, config);
}

void BrgemmAMXKernelExecutor::configure_tiles_if_needed(amx_tile_config_t* config,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ struct BrgemmCopyBKernelConfig : public snippets::KernelExecutorBase::GenericCon

private:
struct StaticParams {
StaticParams(const element::Type& src_dt,
const element::Type& wei_dt,
StaticParams(const element::Type& src_type,
const element::Type& wei_type,
dnnl::impl::cpu::x64::cpu_isa_t isa,
bool is_with_comp,
bool is_transposed_B,
Expand Down
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -405,12 +405,10 @@ void Graph::Activate() {
void Graph::Configure([[maybe_unused]] bool optimize) {
OPENVINO_ASSERT(status == Status::NotReady, "Invalid graph status");

GraphOptimizer optimizer;

SortTopologically();
InitNodes();

optimizer.ApplyCommonGraphOptimizations(*this);
ov::intel_cpu::GraphOptimizer::ApplyCommonGraphOptimizations(*this);

SortTopologically();

Expand All @@ -429,7 +427,7 @@ void Graph::Configure([[maybe_unused]] bool optimize) {

ResolveComplexInplaceConflicts();

optimizer.ApplyImplSpecificGraphOptimizations(*this);
ov::intel_cpu::GraphOptimizer::ApplyImplSpecificGraphOptimizations(*this);

SortTopologically();

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ class Graph {

bool graphHasDynamicInput = false;

void Replicate(const std::shared_ptr<const ov::Model>& subgraph,
void Replicate(const std::shared_ptr<const ov::Model>& model,
const std::vector<node::Input::InputConfig>& inputConfigs = {},
const std::vector<node::Input::OutputConfig>& outputConfigs = {});

Expand Down
36 changes: 21 additions & 15 deletions src/plugins/intel_cpu/src/graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1650,11 +1650,17 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph)
}

if (fuseCandidate->getAlgorithm() == Algorithm::EltwiseAdd) {
for (auto& fusedNode : binConv->fusedWith) {
auto isNotSpecialConvolutionAddFusing = [](const NodePtr& fusedNode) {
const auto eltwise = std::dynamic_pointer_cast<Eltwise>(fusedNode);
if (eltwise && eltwise->isSpecialConvolutionAddFusing()) {
return false;
}
return !(eltwise && eltwise->isSpecialConvolutionAddFusing());
};
auto allFusedNodesNotSpecial = [&]() {
return std::all_of(binConv->fusedWith.begin(),
binConv->fusedWith.end(),
isNotSpecialConvolutionAddFusing);
};
if (!allFusedNodesNotSpecial()) {
return false;
}
return true;
}
Expand All @@ -1672,13 +1678,10 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph)
}

auto checkFusedWithSum = [](Convolution* conv) -> bool {
for (const auto& node : conv->getFusedWith()) {
return std::any_of(conv->getFusedWith().begin(), conv->getFusedWith().end(), [](const NodePtr& node) {
const auto eltwise = std::dynamic_pointer_cast<Eltwise>(node);
if (eltwise && eltwise->isSpecialConvolutionAddFusing()) {
return true;
}
}
return false;
return eltwise && eltwise->isSpecialConvolutionAddFusing();
});
};

auto* convNode1 = dynamic_cast<Convolution*>(parent1.get());
Expand Down Expand Up @@ -3185,11 +3188,14 @@ void GraphOptimizer::RemoveConvertMemoryOutput(Graph& graph) {
return false;
}

auto&& childEdges = node->getChildEdgesAtPort(0);
for (auto&& edge : childEdges) {
if (Type::MemoryOutput != edge->getChild()->getType()) {
return false;
}
auto allChildrenAreMemoryOutput = [&]() {
auto&& childEdges = node->getChildEdgesAtPort(0);
return std::all_of(childEdges.begin(), childEdges.end(), [](const auto& edge) {
return Type::MemoryOutput == edge->getChild()->getType();
});
};
if (!allChildrenAreMemoryOutput()) {
return false;
}

return true;
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/graph_optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class GraphOptimizer {

public:
static void ApplyCommonGraphOptimizations(Graph& graph);
void ApplyImplSpecificGraphOptimizations(Graph& graph);
static void ApplyImplSpecificGraphOptimizations(Graph& graph);
static void ShareReorders(Graph& graph);

private:
Expand Down Expand Up @@ -49,8 +49,8 @@ class GraphOptimizer {
static void FuseEltwiseAndSimple(Graph& graph);
static void FusePerformedAsScaleShiftAndFakeQuantize(Graph& graph);
static void FuseClampAndFakeQuantize(Graph& graph);
void MergeTransposeAndReorder(Graph& graph);
void MergeReorderAndTranspose(Graph& graph);
static void MergeTransposeAndReorder(Graph& graph);
static void MergeReorderAndTranspose(Graph& graph);
static void reshapeRnnSeq(Graph& graph);
static void RemoveSameConvert(Graph& graph);
static void RemoveMemoryInputConvert(Graph& graph);
Expand Down
55 changes: 25 additions & 30 deletions src/plugins/intel_cpu/src/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,11 +234,9 @@ void Node::remove() {
}

bool Node::isEdgesEmpty(const std::vector<EdgeWeakPtr>& edges) {
for (const auto& edge : edges) {
if (edge.lock()) {
return false;
}
}
return std::all_of(edges.begin(), edges.end(), [](const EdgeWeakPtr& edge) {
return !edge.lock();
});
return true;
}

Expand Down Expand Up @@ -371,7 +369,8 @@ void Node::selectPreferPrimitiveDescriptorWithShape(const std::vector<impl_desc_
bool ignoreConstInputs) {
// Filter out dynamic shape.
if (isDynamic) {
return selectPreferPrimitiveDescriptor(priority, ignoreConstInputs);
selectPreferPrimitiveDescriptor(priority, ignoreConstInputs);
return;
}

auto estimateReorderOverhead = [&](const ov::intel_cpu::NodeDesc& supportedPrimitiveDesc,
Expand Down Expand Up @@ -835,9 +834,10 @@ void Node::updateDynamicParams() {

void Node::execute(const dnnl::stream& strm, int numaId) {
if (isDynamicNode()) {
return executeDynamic(strm, numaId);
executeDynamic(strm, numaId);
} else {
executeStatic(strm, numaId);
}
return executeStatic(strm, numaId);
}

void Node::executeStatic(const dnnl::stream& strm, int numaId) {
Expand Down Expand Up @@ -1218,7 +1218,7 @@ void Node::toNumaNode(int numaNodeID) {
return;
}

return toNumaNodeImpl(numaNodeID);
toNumaNodeImpl(numaNodeID);
}

void Node::toNumaNodeImpl(int numaNodeID) {
Expand Down Expand Up @@ -1521,13 +1521,9 @@ void Node::appendPostOpArgs([[maybe_unused]] const dnnl::primitive_attr& attr,
}

bool Node::isFusedWith(Type fusedNodeType) const {
for (const auto& fusedNode : fusedWith) {
if (fusedNode->type == fusedNodeType) {
return true;
}
}

return false;
return std::any_of(fusedWith.begin(), fusedWith.end(), [fusedNodeType](const NodePtr& fusedNode) {
return fusedNode->type == fusedNodeType;
});
}

dnnl::memory::format_tag Node::getWeightsFormatTagByDims(const VectorDims& dims) {
Expand Down Expand Up @@ -2153,19 +2149,18 @@ void Node::resolveInPlaceDirection() {
config.inConfs[inpPort].inPlace(-1);
initDescriptor(config);
} else if (parentInPlaceDirection == InplaceDirectionType::DOWN) {
// search if siblings already have downstream direction
auto downstreamPeers = [&] {
for (auto& peerEdge : pParent->getChildEdgesAtPort(pEdge->getInputNum())) {
auto* peerNode = peerEdge->getChild().get();
if (peerNode == this) {
continue;
}
if (inPlaceDirection(peerNode, PortType::INPUT, peerEdge->getOutputNum()) ==
InplaceDirectionType::DOWN) {
return true;
}
}
return false;
const auto& childEdges = pParent->getChildEdgesAtPort(pEdge->getInputNum());
return std::any_of(childEdges.begin(),
childEdges.end(),
[this, &inPlaceDirection](const EdgePtr& edge) {
auto* peerNode = edge->getChild().get();
if (peerNode == this) {
return false;
}
return inPlaceDirection(peerNode, PortType::INPUT, edge->getOutputNum()) ==
InplaceDirectionType::DOWN;
});
}();
if (downstreamPeers) {
// when there is an downstream peer we have to resolve upstream inplace for the node
Expand Down Expand Up @@ -2234,8 +2229,8 @@ void Node::resolveInPlaceDirection() {
} else {
auto result = inPlaceDirection(pChild, PortType::INPUT, edge->getOutputNum());
if (InplaceDirectionType::CYCLIC == result) {
return searchReferencingOutput(pChild,
pChild->inPlaceInputPort(edge->getOutputNum()));
searchReferencingOutput(pChild, pChild->inPlaceInputPort(edge->getOutputNum()));
return;
}
}
}
Expand Down
Loading
Loading