Skip to content

Commit bd594f1

Browse files
committed
Revert "ggml : remove ggml_task_type and GGML_PERF (ggml-org#8017)"
This reverts commit 95f57bb.
1 parent 1d78061 commit bd594f1

File tree

8 files changed

+1078
-398
lines changed

8 files changed

+1078
-398
lines changed

CMakeLists.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,9 @@ option(LLAMA_BUILD_SERVER "llama: build server example"
144144
option(LLAMA_LASX "llama: enable lasx" ON)
145145
option(LLAMA_LSX "llama: enable lsx" ON)
146146

147+
# add perf arguments
148+
option(LLAMA_PERF "llama: enable perf" OFF)
149+
147150
# Required for relocatable CMake package
148151
include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake)
149152

@@ -867,6 +870,10 @@ if (LLAMA_CPU_HBM)
867870
target_link_libraries(ggml PUBLIC memkind)
868871
endif()
869872
873+
if (LLAMA_PERF)
874+
add_compile_definitions(GGML_PERF)
875+
endif()
876+
870877
function(get_flags CCID CCVER)
871878
set(C_FLAGS "")
872879
set(CXX_FLAGS "")

Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -344,6 +344,9 @@ ifdef LLAMA_GPROF
344344
MK_CFLAGS += -pg
345345
MK_CXXFLAGS += -pg
346346
endif
347+
ifdef LLAMA_PERF
348+
MK_CPPFLAGS += -DGGML_PERF
349+
endif
347350

348351
# Architecture specific
349352
# TODO: probably these flags need to be tweaked on some architectures

ggml-vulkan.cpp

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -513,8 +513,8 @@ static size_t vk_skip_checks;
513513
static size_t vk_output_tensor;
514514

515515
static void ggml_vk_print_tensor(ggml_backend * ctx, const ggml_tensor * tensor, const char * name);
516-
static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_tensor * tensor);
517-
static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_tensor * tensor);
516+
static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
517+
static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
518518
#endif
519519

520520
typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
@@ -5644,7 +5644,7 @@ static void ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
56445644
}
56455645
}
56465646

5647-
static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * tensor){
5647+
static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor){
56485648
ggml_tensor_extra_gpu * extra = nullptr;
56495649

56505650
switch (tensor->op) {
@@ -5697,10 +5697,17 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor *
56975697
return false;
56985698
}
56995699

5700+
if (params->ith != 0) {
5701+
return true;
5702+
}
5703+
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
5704+
return true;
5705+
}
5706+
57005707
VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
57015708

57025709
#ifdef GGML_VULKAN_CHECK_RESULTS
5703-
ggml_vk_check_results_0(ctx, tensor);
5710+
ggml_vk_check_results_0(ctx, params, tensor);
57045711
#endif
57055712

57065713
vk_context& subctx = ctx->gc.contexts[extra->ctx_idx];
@@ -6207,20 +6214,23 @@ GGML_CALL static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backen
62076214
ggml_vk_build_graph(ctx,cgraph->nodes[i], i == last_node);
62086215
}
62096216

6217+
ggml_compute_params params = {};
6218+
params.type = GGML_TASK_TYPE_COMPUTE;
6219+
params.ith = 0;
62106220
for (int i = 0; i < cgraph->n_nodes; i++) {
62116221
ggml_tensor * node = cgraph->nodes[i];
62126222

62136223
if (ggml_vk_is_empty(node)) {
62146224
continue;
62156225
}
62166226

6217-
bool ok = ggml_vk_compute_forward(ctx, node);
6227+
bool ok = ggml_vk_compute_forward(ctx, &params, node);
62186228
if (!ok) {
62196229
fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
62206230
}
62216231
#ifdef GGML_VULKAN_CHECK_RESULTS
62226232
else {
6223-
ggml_vk_check_results_1(ctx, node);
6233+
ggml_vk_check_results_1(ctx, &params, node);
62246234
}
62256235
#endif
62266236
GGML_ASSERT(ok);
@@ -6590,8 +6600,11 @@ void * comp_result;
65906600
size_t comp_size;
65916601
size_t comp_nb[GGML_MAX_DIMS];
65926602
size_t check_counter = 0;
6593-
static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_tensor * tensor) {
6594-
if (tensor->op == GGML_OP_TRANSPOSE) {
6603+
static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
6604+
if (params->ith != 0) {
6605+
return;
6606+
}
6607+
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
65956608
return;
65966609
}
65976610

@@ -6895,8 +6908,11 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_tensor *
68956908
ggml_free(ggml_ctx);
68966909
}
68976910

6898-
static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_tensor * tensor) {
6899-
if (tensor->op == GGML_OP_TRANSPOSE) {
6911+
static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
6912+
if (params->ith != 0) {
6913+
return;
6914+
}
6915+
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
69006916
return;
69016917
}
69026918
if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {

0 commit comments

Comments
 (0)