Skip to content

Commit eb3f48b

Browse files
ggerganoviThalay
authored andcommitted
sync : ggml (new ops, new backend, etc) (ggml-org#1602)
* sync : ggml (new ops, new backend, etc) * whisper : remove obsolete broadcasting code * ggml : remove backend self-registers + fix ggml_concat + n_task logic * metal : fix assert * metal : print resource path * whisper : fix bug if metal init fails
1 parent 89f205b commit eb3f48b

16 files changed

+3989
-1264
lines changed

ggml-alloc.c

Lines changed: 43 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
137137

138138
#ifdef GGML_ALLOCATOR_DEBUG
139139
add_allocated_tensor(alloc, tensor);
140-
size_t cur_max = (char*)addr - (char*)alloc->data + size;
140+
size_t cur_max = (char*)addr - (char*)alloc->base + size;
141141
if (cur_max > alloc->max_size) {
142142
printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
143143
for (int i = 0; i < 1024; i++) {
@@ -168,10 +168,6 @@ static void ggml_tallocr_free_tensor(ggml_tallocr_t alloc, struct ggml_tensor *
168168
size = aligned_offset(NULL, size, alloc->alignment);
169169
AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
170170

171-
if (!alloc->measure) {
172-
ggml_backend_buffer_free_tensor(alloc->buffer, tensor);
173-
}
174-
175171
#ifdef GGML_ALLOCATOR_DEBUG
176172
remove_allocated_tensor(alloc, tensor);
177173
#endif
@@ -237,7 +233,7 @@ void ggml_tallocr_reset(ggml_tallocr_t alloc) {
237233
}
238234

239235
ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment) {
240-
struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(NULL, data, size);
236+
struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(data, size);
241237

242238
ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
243239

@@ -449,7 +445,6 @@ static ggml_tallocr_t node_tallocr(ggml_gallocr_t galloc, struct ggml_tensor * n
449445
static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool update_backend) {
450446
ggml_tallocr_t alloc = node_tallocr(galloc, view);
451447

452-
//printf("init_view: %s from src %s\n", view->name, view->view_src->name);
453448
GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
454449
if (update_backend) {
455450
view->backend = view->view_src->backend;
@@ -459,7 +454,7 @@ static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool upd
459454

460455
// FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend
461456
// due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras
462-
assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend);
457+
assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft);
463458

464459
if (!alloc->measure) {
465460
ggml_backend_buffer_init_tensor(alloc->buffer, view);
@@ -765,3 +760,43 @@ size_t ggml_allocr_max_size(ggml_allocr_t alloc) {
765760
size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph) {
766761
return ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
767762
}
763+
764+
// utils
765+
ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
766+
GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
767+
768+
size_t alignment = ggml_backend_buft_get_alignment(buft);
769+
770+
size_t nbytes = 0;
771+
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
772+
if (t->data == NULL && t->view_src == NULL) {
773+
nbytes += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
774+
}
775+
}
776+
777+
if (nbytes == 0) {
778+
fprintf(stderr, "%s: no tensors to allocate\n", __func__);
779+
return NULL;
780+
}
781+
782+
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes);
783+
ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer);
784+
785+
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
786+
if (t->data == NULL) {
787+
if (t->view_src == NULL) {
788+
ggml_tallocr_alloc(tallocr, t);
789+
} else {
790+
ggml_backend_view_init(buffer, t);
791+
}
792+
}
793+
}
794+
795+
ggml_tallocr_free(tallocr);
796+
797+
return buffer;
798+
}
799+
800+
ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
801+
return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend));
802+
}

ggml-alloc.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ extern "C" {
88

99
struct ggml_backend;
1010
struct ggml_backend_buffer;
11+
struct ggml_backend_buffer_type;
1112

1213
//
1314
// Legacy API
@@ -80,6 +81,12 @@ GGML_API void ggml_gallocr_alloc_graph_n(
8081
struct ggml_hash_set hash_set,
8182
ggml_tallocr_t * hash_node_talloc);
8283

84+
85+
// Utils
86+
// Create a buffer and allocate all the tensors in a ggml_context
87+
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, struct ggml_backend_buffer_type * buft);
88+
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, struct ggml_backend * backend);
89+
8390
#ifdef __cplusplus
8491
}
8592
#endif

ggml-backend-impl.h

Lines changed: 46 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12,31 +12,50 @@ extern "C" {
1212
// Backend buffer
1313
//
1414

15+
// buffer type
16+
typedef void * ggml_backend_buffer_type_context_t;
17+
18+
struct ggml_backend_buffer_type_i {
19+
ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
20+
size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
21+
size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
22+
bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
23+
};
24+
25+
struct ggml_backend_buffer_type {
26+
struct ggml_backend_buffer_type_i iface;
27+
ggml_backend_buffer_type_context_t context;
28+
};
29+
30+
// buffer
1531
typedef void * ggml_backend_buffer_context_t;
1632

1733
struct ggml_backend_buffer_i {
18-
void (*free_buffer) (ggml_backend_buffer_t buffer);
19-
void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer
20-
size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback
21-
void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback
22-
void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback
34+
void (*free_buffer)(ggml_backend_buffer_t buffer);
35+
//void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
36+
void * (*get_base) (ggml_backend_buffer_t buffer);
37+
void (*init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
38+
void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
39+
void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
40+
// (optional) copy tensor between different buffer-type, allow for single-copy tranfers
41+
void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
42+
void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
2343
};
2444

2545
struct ggml_backend_buffer {
26-
struct ggml_backend_buffer_i iface;
27-
28-
ggml_backend_t backend;
46+
struct ggml_backend_buffer_i iface;
47+
ggml_backend_buffer_type_t buft;
2948
ggml_backend_buffer_context_t context;
30-
3149
size_t size;
3250
};
3351

34-
GGML_API ggml_backend_buffer_t ggml_backend_buffer_init(
35-
struct ggml_backend * backend,
52+
ggml_backend_buffer_t ggml_backend_buffer_init(
53+
ggml_backend_buffer_type_t buft,
3654
struct ggml_backend_buffer_i iface,
3755
ggml_backend_buffer_context_t context,
3856
size_t size);
3957

58+
4059
//
4160
// Backend
4261
//
@@ -49,20 +68,17 @@ extern "C" {
4968
void (*free)(ggml_backend_t backend);
5069

5170
// buffer allocation
52-
ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size);
71+
ggml_backend_buffer_type_t (*get_default_buffer_type)(ggml_backend_t backend);
5372

54-
// get buffer alignment
55-
size_t (*get_alignment)(ggml_backend_t backend);
56-
57-
// tensor data access
58-
// these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize
73+
// (optional) asynchroneous tensor data access
5974
void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
6075
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
61-
void (*synchronize) (ggml_backend_t backend);
6276

63-
// (optional) copy tensor between different backends, allow for single-copy tranfers
64-
void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
65-
void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
77+
// (optional) asynchroneous tensor copy
78+
void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
79+
void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
80+
81+
void (*synchronize) (ggml_backend_t backend);
6682

6783
// compute graph with a plan
6884
ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
@@ -82,6 +98,15 @@ extern "C" {
8298
ggml_backend_context_t context;
8399
};
84100

101+
102+
//
103+
// Backend registry
104+
//
105+
106+
typedef ggml_backend_t (*ggml_backend_init_fn)(const char * params, void * user_data);
107+
108+
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data);
109+
85110
#ifdef __cplusplus
86111
}
87112
#endif

0 commit comments

Comments
 (0)