From c2f5be7c11f51e155419d270b861ce765b247dd0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 8 Nov 2022 20:29:17 +0200 Subject: [PATCH 1/4] diarization : some unsuccessful experiments with audio embd clustering --- examples/main/main.cpp | 2 + whisper.cpp | 168 ++++++++++++++++++++++++++++++++++++++--- whisper.h | 4 + 3 files changed, 162 insertions(+), 12 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index b8366b79f45..105082cbafb 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -618,6 +618,8 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s: failed to process audio\n", argv[0]); return 10; } + + whisper_full_cluster_segments(ctx); } // output stuff diff --git a/whisper.cpp b/whisper.cpp index 04cbc36b2ca..4c208b9b497 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -603,6 +603,8 @@ struct whisper_context { // [EXPERIMENTAL] speed-up techniques int32_t exp_n_audio_ctx; // 0 - use default + std::vector audio_embd; + void use_buf(struct ggml_context * ctx, int i) { #if defined(WHISPER_USE_SCRATCH) size_t last_size = 0; @@ -1707,18 +1709,34 @@ static bool whisper_encode( } // cur - //{ - // printf("ne0 = %d\n", cur->ne[0]); - // printf("ne1 = %d\n", cur->ne[1]); - // for (int i = 0; i < 10; ++i) { - // printf("%8.4f ", ((float *)(cur->data))[i]); - // } - // printf("... "); - // for (int i = cur->ne[0] - 10; i < cur->ne[0]; ++i) { - // printf("%8.4f ", ((float *)(cur->data))[i]); - // } - // printf("\n"); - //} + { + //printf("ne0 = %d\n", cur->ne[0]); + //printf("ne1 = %d\n", cur->ne[1]); + //for (int i = 0; i < 10; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + //} + //printf("... "); + //for (int i = cur->ne[0] - 10; i < cur->ne[0]; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + //} + //printf("\n"); + } + + { + const int i0 = std::min(mel_offset, mel_inp.n_len); + const int i1 = std::min(mel_offset + 2*n_ctx, mel_inp.n_len); + + printf("i0 = %d, i1 = %d, (i1 - i0) = %d, embd size = %d\n", i0, i1, i1 - i0, cur->ne[0]); + + wctx.audio_embd.clear(); + wctx.audio_embd.resize(cur->ne[0], 0.0f); + for (int j = 0; j < cur->ne[0]; ++j) { + for (int i = i0; i < i1; ++i) { + wctx.audio_embd[j] += ((float *)(cur->data))[(i - i0)*cur->ne[0] + j]; + } + wctx.audio_embd[j] /= (i1 - i0); + } + } // pre-compute cross-attention memory { @@ -4806,3 +4824,129 @@ static void whisper_exp_compute_token_level_timestamps( // } //} } + +// +// diarization stuff +// + +void whisper_full_cluster_segments(struct whisper_context * ctx) { + const int n_segments = ctx->result_all.size(); + printf("%s: clustering %d segments\n", __func__, n_segments); + + const auto mel_len_save = ctx->mel.n_len; + printf("%s: mel_len_save = %d\n", __func__, mel_len_save); + + std::vector> features(n_segments); + + for (int i = 0; i < n_segments; ++i) { + const auto & segment_i = ctx->result_all[i]; + printf("%s: segment %d: t0 = %d, t1 = %d, text = %s\n", __func__, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); + + ctx->mel.n_len = segment_i.t1; + whisper_encode(ctx, segment_i.t0, 4); + + features[i] = ctx->audio_embd; + } + + const int n_features = features[0].size(); + + // fuzzy c-means clustering + const int n_clusters = 4; + + std::vector> centroids(n_clusters, std::vector(n_features, 0.0)); + std::vector> membership(n_segments, std::vector(n_clusters, 0.0)); + + // initialize the centroids + for (int i = 0; i < n_clusters; ++i) { + for (int j = 0; j < n_features; ++j) { + centroids[i][j] = features[i][j]; + } + } + + // initialize the membership + for (int i = 0; i < n_segments; ++i) { + membership[i][i % n_clusters] = 1.0; + } + + // iterate + for (int i = 0; i < 100; ++i) { + // update the centroids + for (int j = 0; j < n_clusters; ++j) { + for (int k = 0; k < n_features; ++k) { + centroids[j][k] = 0.0; + } + } + + for (int j = 0; j < n_segments; ++j) { + for (int k = 0; k < n_clusters; ++k) { + for (int l = 0; l < n_features; ++l) { + centroids[k][l] += membership[j][k]*features[j][l]; + } + } + } + + for (int j = 0; j < n_clusters; ++j) { + float sum = 0.0; + for (int k = 0; k < n_segments; ++k) { + sum += membership[k][j]; + } + + for (int k = 0; k < n_features; ++k) { + centroids[j][k] /= sum; + } + } + + // update the membership + for (int j = 0; j < n_segments; ++j) { + for (int k = 0; k < n_clusters; ++k) { + float sum = 0.0; + for (int l = 0; l < n_clusters; ++l) { + //sum += std::pow(whisper_distance(features[j], centroids[k])/whisper_distance(features[j], centroids[l]), 2.0/(2.0 - 1.0)); + + // use the euclidean distance + double d0 = 0.0; + for (int m = 0; m < n_features; ++m) { + d0 += std::pow(features[j][m] - centroids[k][m], 2.0); + } + d0 = std::sqrt(d0); + + double d1 = 0.0; + for (int m = 0; m < n_features; ++m) { + d1 += std::pow(features[j][m] - centroids[l][m], 2.0); + } + d1 = std::sqrt(d1); + if (d1 == 0.0) { + sum += 1.0; + } else { + sum += std::pow(d0/d1, 2.0/(2.0 - 1.0)); + } + } + + membership[j][k] = 1.0/sum; + } + } + + // print the membership + for (int i = 0; i < n_segments; ++i) { + printf("%s: membership %d: ", __func__, i); + for (int j = 0; j < n_clusters; ++j) { + printf("%f ", membership[i][j]); + } + printf(" '%s'\n", ctx->result_all[i].text.c_str()); + } + printf("----------------\n"); + } + + // print the centroids + //for (int i = 0; i < n_clusters; ++i) { + // printf("%s: centroid %d: ", __func__, i); + // for (int j = 0; j < n_features; ++j) { + // printf("%f ", centroids[i][j]); + // } + // printf("\n"); + //} + + // restore the mel length + ctx->mel.n_len = mel_len_save; +} + diff --git a/whisper.h b/whisper.h index 7eece797c16..9e40e702c9f 100644 --- a/whisper.h +++ b/whisper.h @@ -372,6 +372,10 @@ extern "C" { WHISPER_API int whisper_bench_memcpy(int n_threads); WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads); + // Temporary experimental API + + WHISPER_API void whisper_full_cluster_segments(struct whisper_context * ctx); + #ifdef __cplusplus } #endif From d5d7769fa70f8641e88d1475c790def95c9c28ea Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 18 Feb 2023 18:36:03 +0200 Subject: [PATCH 2/4] diarization : more unsuccessful clustering experiments --- ggml.c | 187 ++++++++++++++++++++++++++++++++++++++++++ ggml.h | 10 +++ whisper.cpp | 232 ++++++++++++++++++++++++++++++---------------------- 3 files changed, 329 insertions(+), 100 deletions(-) diff --git a/ggml.c b/ggml.c index d67612c36a3..54094f04f0b 100644 --- a/ggml.c +++ b/ggml.c @@ -8517,6 +8517,193 @@ enum ggml_opt_result ggml_opt( //////////////////////////////////////////////////////////////////////////////// +void ggml_svd_reduce_dims( + int ne0, + int ne1, + float * a, + int nd) { + int n = ne1; + int m = ne0; + + float * A = a; + float * A0 = (float *) malloc(n * m * sizeof(float)); + + // average vector + float * M = (float *) malloc(m * sizeof(float)); + + { + for (int j = 0; j < m; ++j) { + M[j] = 0.0f; + } + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + M[j] += A[i * m + j]; + } + } + for (int j = 0; j < m; ++j) { + M[j] /= (float) n; + } + } + + // subtract average vector + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + A[i * m + j] -= M[j]; + } + } + + memcpy(A0, A, n * m * sizeof(float)); + + // print A + //printf("A:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < m; ++j) { + // printf("%9.5f ", A[i * m + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + // SVD + // A = U * S * V^T + + float * U = (float *) malloc(n * m * sizeof(float)); + float * S = (float *) malloc(n * sizeof(float)); + float * V = (float *) malloc(n * n * sizeof(float)); + + int lda = m; + int ldu = m; + int ldvt = n; + + float work_size; + int lwork = -1; + int info = 0; + + sgesvd_("S", "S", &m, &n, A, &lda, S, U, &ldu, V, &ldvt, &work_size, &lwork, &info); + + lwork = (int) work_size; + + //printf("work_size = %f, info = %d, lwork = %d\n", work_size, info, lwork); + + float * work = (float *) malloc(lwork * sizeof(float)); + + sgesvd_("S", "S", &m, &n, A, &lda, S, U, &ldu, V, &ldvt, work, &lwork, &info); + + free(work); + + // print U + //printf("U:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < m; ++j) { + // printf("%9.5f ", U[i * m + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + // normalize S + { + double sum = 0.0; + for (int i = 0; i < n; ++i) { + sum += S[i]; + } + sum *= sqrt((double) m); + for (int i = 0; i < n; ++i) { + S[i] /= sum; + } + } + + // print S + //printf("S:\n"); + //for (int i = 0; i < n; ++i) { + // printf("- %d = %9.5f\n", i, S[i]); + //} + //printf("\n"); + + // print V + //printf("V:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < n; ++j) { + // printf("%9.5f ", V[i * n + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + // print A + //printf("A:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < m; ++j) { + // printf("%9.5f ", A[i * m + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + // compute singular vectors in U + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + U[i * m + j] *= S[i]; + } + } + + // normalize U + for (int i = 0; i < n; ++i) { + double sum = 0.0; + for (int j = 0; j < m; ++j) { + sum += U[i * m + j] * U[i * m + j]; + } + sum = sqrt(sum); + for (int j = 0; j < m; ++j) { + U[i * m + j] /= sum*sqrt((double) m); + } + } + + // print U + //printf("U:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < m; ++j) { + // printf("%9.5f ", U[i * m + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + + // project A0 onto U + for (int i = 0; i < n; ++i) { + for (int j = 0; j < n; ++j) { + A[i * nd + j] = 0.0f; + for (int k = 0; k < m; ++k) { + A[i * nd + j] += A0[i * m + k] * U[j * m + k]; + } + } + } + + // print A + //printf("A:\n"); + //for (int i = 0; i < n; ++i) { + // printf("col %d : ", i); + // for (int j = 0; j < n; ++j) { + // printf("%9.5f ", A[i * n + j]); + // } + // printf("\n"); + //} + //printf("\n"); + + free(U); + free(S); + free(V); + free(A0); +} + +//////////////////////////////////////////////////////////////////////////////// + int ggml_cpu_has_avx(void) { #if defined(__AVX__) return 1; diff --git a/ggml.h b/ggml.h index 18f317bec04..e63b286125d 100644 --- a/ggml.h +++ b/ggml.h @@ -726,6 +726,16 @@ enum ggml_opt_result ggml_opt( struct ggml_opt_params params, struct ggml_tensor * f); +// +// Temp stuff +// + +void ggml_svd_reduce_dims( + int ne0, + int ne1, + float * a, + int nd); + // // system info // diff --git a/whisper.cpp b/whisper.cpp index 4c208b9b497..0b91a151c8f 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -603,8 +603,6 @@ struct whisper_context { // [EXPERIMENTAL] speed-up techniques int32_t exp_n_audio_ctx; // 0 - use default - std::vector audio_embd; - void use_buf(struct ggml_context * ctx, int i) { #if defined(WHISPER_USE_SCRATCH) size_t last_size = 0; @@ -1360,7 +1358,8 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con static bool whisper_encode( whisper_context & wctx, const int mel_offset, - const int n_threads) { + const int n_threads, + bool repeat = false) { const int64_t t_start_us = ggml_time_us(); const auto & model = wctx.model; @@ -1392,9 +1391,24 @@ static bool whisper_encode( const int i0 = std::min(mel_offset, mel_inp.n_len); const int i1 = std::min(mel_offset + 2*n_ctx, mel_inp.n_len); - for (int j = 0; j < mel_inp.n_mel; ++j) { - for (int i = i0; i < i1; ++i) { - dst[j*2*n_ctx + (i - i0)] = mel_inp.data[j*mel_inp.n_len + i]; + if (repeat == false) { + for (int j = 0; j < mel_inp.n_mel; ++j) { + for (int i = i0; i < i1; ++i) { + dst[j*2*n_ctx + (i - i0)] = mel_inp.data[j*mel_inp.n_len + i]; + } + } + } else { + for (int j = 0; j < mel_inp.n_mel; ++j) { + int k = 0; + while (k < 2*n_ctx) { + for (int i = i0; i < i1; ++i) { + dst[j*2*n_ctx + k] = mel_inp.data[j*mel_inp.n_len + i]; + k++; + if (k >= 2*n_ctx) { + break; + } + } + } } } } @@ -1722,22 +1736,6 @@ static bool whisper_encode( //printf("\n"); } - { - const int i0 = std::min(mel_offset, mel_inp.n_len); - const int i1 = std::min(mel_offset + 2*n_ctx, mel_inp.n_len); - - printf("i0 = %d, i1 = %d, (i1 - i0) = %d, embd size = %d\n", i0, i1, i1 - i0, cur->ne[0]); - - wctx.audio_embd.clear(); - wctx.audio_embd.resize(cur->ne[0], 0.0f); - for (int j = 0; j < cur->ne[0]; ++j) { - for (int i = i0; i < i1; ++i) { - wctx.audio_embd[j] += ((float *)(cur->data))[(i - i0)*cur->ne[0] + j]; - } - wctx.audio_embd[j] /= (i1 - i0); - } - } - // pre-compute cross-attention memory { struct ggml_cgraph gf = {}; @@ -4836,117 +4834,151 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { const auto mel_len_save = ctx->mel.n_len; printf("%s: mel_len_save = %d\n", __func__, mel_len_save); - std::vector> features(n_segments); + const int n_ctx = ctx->model.hparams.n_audio_ctx; + const int n_state = ctx->model.hparams.n_audio_state; + const int n_layer = ctx->model.hparams.n_audio_layer; + + for (int il = 0; il < n_layer; ++il) { + std::vector embd(n_segments*n_ctx*n_state); + + for (int i = 0; i < n_segments; ++i) { + const auto & segment_i = ctx->result_all[i]; + printf("%s: layer %2d, segment %3d: t0 = %7d, t1 = %7d, text = %s\n", __func__, il, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); - for (int i = 0; i < n_segments; ++i) { - const auto & segment_i = ctx->result_all[i]; - printf("%s: segment %d: t0 = %d, t1 = %d, text = %s\n", __func__, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); + ctx->mel.n_len = segment_i.t1; + whisper_encode(*ctx, segment_i.t0, 7, true); - ctx->mel.n_len = segment_i.t1; - whisper_encode(ctx, segment_i.t0, 4); + const size_t offs = ggml_element_size(ctx->kv_cross.k)*(il*n_ctx*n_state); + const ggml_fp16_t * f = (const ggml_fp16_t * )((const char *) ctx->kv_cross.k->data + offs); - features[i] = ctx->audio_embd; - } + for (int j = 0; j < n_ctx*n_state; ++j) { + embd[i*n_ctx*n_state + j] = ggml_fp16_to_fp32(f[j]); + } + } - const int n_features = features[0].size(); + const int n_features = 64; - // fuzzy c-means clustering - const int n_clusters = 4; + ggml_svd_reduce_dims(n_ctx*n_state, n_segments, embd.data(), n_features); - std::vector> centroids(n_clusters, std::vector(n_features, 0.0)); - std::vector> membership(n_segments, std::vector(n_clusters, 0.0)); + std::vector> features(n_segments); - // initialize the centroids - for (int i = 0; i < n_clusters; ++i) { - for (int j = 0; j < n_features; ++j) { - centroids[i][j] = features[i][j]; + for (int i = 0; i < n_segments; ++i) { + features[i].resize(n_features); + for (int j = 0; j < n_features; ++j) { + features[i][j] = embd[i*n_features + j]; + } } - } - // initialize the membership - for (int i = 0; i < n_segments; ++i) { - membership[i][i % n_clusters] = 1.0; - } + // fuzzy c-means clustering + const int n_clusters = 2; - // iterate - for (int i = 0; i < 100; ++i) { - // update the centroids - for (int j = 0; j < n_clusters; ++j) { - for (int k = 0; k < n_features; ++k) { - centroids[j][k] = 0.0; + std::vector> centroids(n_clusters, std::vector(n_features, 0.0)); + std::vector> membership(n_segments, std::vector(n_clusters, 0.0)); + + // initialize the centroids + for (int i = 0; i < n_clusters; ++i) { + for (int j = 0; j < n_features; ++j) { + centroids[i][j] = features[i][j]; } } - for (int j = 0; j < n_segments; ++j) { - for (int k = 0; k < n_clusters; ++k) { - for (int l = 0; l < n_features; ++l) { - centroids[k][l] += membership[j][k]*features[j][l]; - } + // initialize the membership + for (int i = 0; i < n_segments; ++i) { + //membership[i][i % n_clusters] = 1.0; + for (int j = 0; j < n_clusters; ++j) { + membership[i][j] = rand() / (float) RAND_MAX; } } - for (int j = 0; j < n_clusters; ++j) { - float sum = 0.0; - for (int k = 0; k < n_segments; ++k) { - sum += membership[k][j]; + const int niter = 10000; + + // iterate + for (int i = 0; i < niter; ++i) { + // update the centroids + for (int j = 0; j < n_clusters; ++j) { + for (int k = 0; k < n_features; ++k) { + centroids[j][k] = 0.0; + } } - for (int k = 0; k < n_features; ++k) { - centroids[j][k] /= sum; + for (int j = 0; j < n_segments; ++j) { + for (int k = 0; k < n_clusters; ++k) { + for (int l = 0; l < n_features; ++l) { + centroids[k][l] += membership[j][k]*features[j][l]; + } + } } - } - // update the membership - for (int j = 0; j < n_segments; ++j) { - for (int k = 0; k < n_clusters; ++k) { + for (int j = 0; j < n_clusters; ++j) { float sum = 0.0; - for (int l = 0; l < n_clusters; ++l) { - //sum += std::pow(whisper_distance(features[j], centroids[k])/whisper_distance(features[j], centroids[l]), 2.0/(2.0 - 1.0)); + for (int k = 0; k < n_segments; ++k) { + sum += membership[k][j]; + } - // use the euclidean distance - double d0 = 0.0; - for (int m = 0; m < n_features; ++m) { - d0 += std::pow(features[j][m] - centroids[k][m], 2.0); - } - d0 = std::sqrt(d0); + for (int k = 0; k < n_features; ++k) { + centroids[j][k] /= sum; + } + } - double d1 = 0.0; - for (int m = 0; m < n_features; ++m) { - d1 += std::pow(features[j][m] - centroids[l][m], 2.0); - } - d1 = std::sqrt(d1); - if (d1 == 0.0) { - sum += 1.0; - } else { - sum += std::pow(d0/d1, 2.0/(2.0 - 1.0)); + // update the membership + for (int j = 0; j < n_segments; ++j) { + for (int k = 0; k < n_clusters; ++k) { + float sum = 0.0; + for (int l = 0; l < n_clusters; ++l) { + //sum += std::pow(whisper_distance(features[j], centroids[k])/whisper_distance(features[j], centroids[l]), 2.0/(2.0 - 1.0)); + + // use the euclidean distance + double d0 = 0.0; + for (int m = 0; m < n_features; ++m) { + d0 += std::pow(features[j][m] - centroids[k][m], 2.0); + } + d0 = std::sqrt(d0); + + double d1 = 0.0; + for (int m = 0; m < n_features; ++m) { + d1 += std::pow(features[j][m] - centroids[l][m], 2.0); + } + d1 = std::sqrt(d1); + + if (d1 == 0.0) { + sum += 1.0; + } else { + sum += std::pow(d0/d1, 2.0/(1.10 - 1.0)); + } } + + membership[j][k] = 1.0/sum; } + } - membership[j][k] = 1.0/sum; + // print the membership + if (i == niter - 1) { + for (int i = 0; i < n_segments; ++i) { + printf("%s: membership %3d: ", __func__, i); + for (int j = 0; j < n_clusters; ++j) { + printf("%f ", membership[i][j]); + } + printf(" '%s'\n", ctx->result_all[i].text.c_str()); + //printf("%s: features : ", __func__); + //for (int j = 0; j < n_features; ++j) { + // printf("%8.3f ", features[i][j]); + //} + //printf(" '%s'\n", ctx->result_all[i].text.c_str()); + } + printf("----------------\n"); } } - // print the membership - for (int i = 0; i < n_segments; ++i) { - printf("%s: membership %d: ", __func__, i); - for (int j = 0; j < n_clusters; ++j) { - printf("%f ", membership[i][j]); + // print the centroids + for (int i = 0; i < n_clusters; ++i) { + printf("%s: centroid %d: ", __func__, i); + for (int j = 0; j < n_features; ++j) { + printf("%f ", centroids[i][j]); } - printf(" '%s'\n", ctx->result_all[i].text.c_str()); + printf("\n"); } - printf("----------------\n"); } - // print the centroids - //for (int i = 0; i < n_clusters; ++i) { - // printf("%s: centroid %d: ", __func__, i); - // for (int j = 0; j < n_features; ++j) { - // printf("%f ", centroids[i][j]); - // } - // printf("\n"); - //} - // restore the mel length ctx->mel.n_len = mel_len_save; } - From d11f35920ebd33a9dc007e010880e62f7e72a885 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 19 Feb 2023 10:33:03 +0200 Subject: [PATCH 3/4] diarization : try to cluster embedings from last encoder layer --- ggml.c | 23 +++++----- whisper.cpp | 122 +++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 108 insertions(+), 37 deletions(-) diff --git a/ggml.c b/ggml.c index 54094f04f0b..a797fc21de9 100644 --- a/ggml.c +++ b/ggml.c @@ -8652,16 +8652,16 @@ void ggml_svd_reduce_dims( } // normalize U - for (int i = 0; i < n; ++i) { - double sum = 0.0; - for (int j = 0; j < m; ++j) { - sum += U[i * m + j] * U[i * m + j]; - } - sum = sqrt(sum); - for (int j = 0; j < m; ++j) { - U[i * m + j] /= sum*sqrt((double) m); - } - } + //for (int i = 0; i < n; ++i) { + // double sum = 0.0; + // for (int j = 0; j < m; ++j) { + // sum += U[i * m + j] * U[i * m + j]; + // } + // sum = sqrt(sum); + // for (int j = 0; j < m; ++j) { + // U[i * m + j] /= sum*sqrt((double) m); + // } + //} // print U //printf("U:\n"); @@ -8675,9 +8675,10 @@ void ggml_svd_reduce_dims( //printf("\n"); + printf("n = %d, m = %d, nd = %d\n", n, m, nd); // project A0 onto U for (int i = 0; i < n; ++i) { - for (int j = 0; j < n; ++j) { + for (int j = 0; j < nd; ++j) { A[i * nd + j] = 0.0f; for (int k = 0; k < m; ++k) { A[i * nd + j] += A0[i * m + k] * U[j * m + k]; diff --git a/whisper.cpp b/whisper.cpp index 0b91a151c8f..679b383d107 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -603,6 +603,8 @@ struct whisper_context { // [EXPERIMENTAL] speed-up techniques int32_t exp_n_audio_ctx; // 0 - use default + std::vector audio_embd; + void use_buf(struct ggml_context * ctx, int i) { #if defined(WHISPER_USE_SCRATCH) size_t last_size = 0; @@ -1723,17 +1725,35 @@ static bool whisper_encode( } // cur + //{ + // printf("ne0 = %d\n", cur->ne[0]); + // printf("ne1 = %d\n", cur->ne[1]); + // for (int i = 0; i < 10; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + // } + // printf("... "); + // for (int i = cur->ne[0] - 10; i < cur->ne[0]; ++i) { + // printf("%8.4f ", ((float *)(cur->data))[i]); + // } + // printf("\n"); + //} + { - //printf("ne0 = %d\n", cur->ne[0]); - //printf("ne1 = %d\n", cur->ne[1]); - //for (int i = 0; i < 10; ++i) { - // printf("%8.4f ", ((float *)(cur->data))[i]); - //} - //printf("... "); - //for (int i = cur->ne[0] - 10; i < cur->ne[0]; ++i) { - // printf("%8.4f ", ((float *)(cur->data))[i]); - //} - //printf("\n"); + //const int i0 = std::min(mel_offset, mel_inp.n_len); + //const int i1 = std::min(mel_offset + 2*n_ctx, mel_inp.n_len); + const int i0 = 0; + const int i1 = cur->ne[1]; + + //printf("i0 = %d, i1 = %d, (i1 - i0) = %d, embd size = %d\n", i0, i1, i1 - i0, cur->ne[0]); + + wctx.audio_embd.clear(); + wctx.audio_embd.resize(cur->ne[0], 0.0f); + for (int j = 0; j < cur->ne[0]; ++j) { + for (int i = i0; i < i1; ++i) { + wctx.audio_embd[j] += ((float *)(cur->data))[(i - i0)*cur->ne[0] + j]; + } + wctx.audio_embd[j] /= (i1 - i0); + } } // pre-compute cross-attention memory @@ -4838,6 +4858,28 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { const int n_state = ctx->model.hparams.n_audio_state; const int n_layer = ctx->model.hparams.n_audio_layer; +#if 1 + // use the last layer of the encoder + { + std::vector embd(n_segments*n_state); + + for (int i = 0; i < n_segments; ++i) { + const auto & segment_i = ctx->result_all[i]; + printf("%s: segment %3d: t0 = %7d, t1 = %7d, text = %s\n", __func__, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); + + ctx->mel.n_len = segment_i.t1; + whisper_encode(*ctx, segment_i.t0, 7, true); + + for (int j = 0; j < n_state; ++j) { + embd[i*n_state + j] = ctx->audio_embd[j]; + } + } + + const int n_features = std::min(4, n_segments); + + ggml_svd_reduce_dims(n_state, n_segments, embd.data(), n_features); +#else + // use cross kv cache of various layers for (int il = 0; il < n_layer; ++il) { std::vector embd(n_segments*n_ctx*n_state); @@ -4856,9 +4898,10 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { } } - const int n_features = 64; + const int n_features = std::min(4, n_segments); ggml_svd_reduce_dims(n_ctx*n_state, n_segments, embd.data(), n_features); +#endif std::vector> features(n_segments); @@ -4927,32 +4970,59 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { for (int l = 0; l < n_clusters; ++l) { //sum += std::pow(whisper_distance(features[j], centroids[k])/whisper_distance(features[j], centroids[l]), 2.0/(2.0 - 1.0)); - // use the euclidean distance double d0 = 0.0; - for (int m = 0; m < n_features; ++m) { - d0 += std::pow(features[j][m] - centroids[k][m], 2.0); - } - d0 = std::sqrt(d0); - double d1 = 0.0; - for (int m = 0; m < n_features; ++m) { - d1 += std::pow(features[j][m] - centroids[l][m], 2.0); - } - d1 = std::sqrt(d1); - if (d1 == 0.0) { - sum += 1.0; - } else { - sum += std::pow(d0/d1, 2.0/(1.10 - 1.0)); + // use the euclidean distance + { + for (int m = 0; m < n_features; ++m) { + d0 += std::pow(features[j][m] - centroids[k][m], 2.0); + } + d0 = std::sqrt(d0); + + for (int m = 0; m < n_features; ++m) { + d1 += std::pow(features[j][m] - centroids[l][m], 2.0); + } + d1 = std::sqrt(d1); } + + // use the cosine distance + //{ + // double dot = 0.0; + // double norm0 = 0.0; + // double norm1 = 0.0; + + // for (int m = 0; m < n_features; ++m) { + // dot += features[j][m]*centroids[k][m]; + // norm0 += std::pow(features[j][m], 2.0); + // norm1 += std::pow(centroids[k][m], 2.0); + // } + + // d0 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); + + // dot = 0.0; + // norm0 = 0.0; + // norm1 = 0.0; + + // for (int m = 0; m < n_features; ++m) { + // dot += features[j][m]*centroids[l][m]; + // norm0 += std::pow(features[j][m], 2.0); + // norm1 += std::pow(centroids[l][m], 2.0); + // } + + // d1 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); + //} + + sum += std::pow(d0/d1, 2.0/(1.15 - 1.0)); } - membership[j][k] = 1.0/sum; + membership[j][k] = sum == 0.0 ? 0.0 : 1.0/sum; } } // print the membership if (i == niter - 1) { + //{ for (int i = 0; i < n_segments; ++i) { printf("%s: membership %3d: ", __func__, i); for (int j = 0; j < n_clusters; ++j) { From ec44ad0a75118ab6708b5d21d381643868fa8234 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 19 Feb 2023 12:19:52 +0200 Subject: [PATCH 4/4] diarization : try conv and self-attention embeddings --- ggml.c | 75 ++++++++-------- whisper.cpp | 248 ++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 210 insertions(+), 113 deletions(-) diff --git a/ggml.c b/ggml.c index a797fc21de9..7719a25e5e1 100644 --- a/ggml.c +++ b/ggml.c @@ -8529,28 +8529,30 @@ void ggml_svd_reduce_dims( float * A0 = (float *) malloc(n * m * sizeof(float)); // average vector - float * M = (float *) malloc(m * sizeof(float)); + //float * M = (float *) malloc(m * sizeof(float)); - { - for (int j = 0; j < m; ++j) { - M[j] = 0.0f; - } - for (int i = 0; i < n; ++i) { - for (int j = 0; j < m; ++j) { - M[j] += A[i * m + j]; - } - } - for (int j = 0; j < m; ++j) { - M[j] /= (float) n; - } - } + //{ + // for (int j = 0; j < m; ++j) { + // M[j] = 0.0f; + // } + // for (int i = 0; i < n; ++i) { + // for (int j = 0; j < m; ++j) { + // M[j] += A[i * m + j]; + // } + // } + // for (int j = 0; j < m; ++j) { + // M[j] /= (float) n; + // } + //} - // subtract average vector - for (int i = 0; i < n; ++i) { - for (int j = 0; j < m; ++j) { - A[i * m + j] -= M[j]; - } - } + //// subtract average vector + //for (int i = 0; i < n; ++i) { + // for (int j = 0; j < m; ++j) { + // A[i * m + j] -= M[j]; + // } + //} + + //free(M); memcpy(A0, A, n * m * sizeof(float)); @@ -8616,11 +8618,11 @@ void ggml_svd_reduce_dims( } // print S - //printf("S:\n"); - //for (int i = 0; i < n; ++i) { - // printf("- %d = %9.5f\n", i, S[i]); - //} - //printf("\n"); + printf("S:\n"); + for (int i = 0; i < n; ++i) { + printf("- %d = %9.5f\n", i, S[i]); + } + printf("\n"); // print V //printf("V:\n"); @@ -8652,16 +8654,16 @@ void ggml_svd_reduce_dims( } // normalize U - //for (int i = 0; i < n; ++i) { - // double sum = 0.0; - // for (int j = 0; j < m; ++j) { - // sum += U[i * m + j] * U[i * m + j]; - // } - // sum = sqrt(sum); - // for (int j = 0; j < m; ++j) { - // U[i * m + j] /= sum*sqrt((double) m); - // } - //} + for (int i = 0; i < n; ++i) { + double sum = 0.0; + for (int j = 0; j < m; ++j) { + sum += U[i * m + j] * U[i * m + j]; + } + sum = sqrt(sum); + for (int j = 0; j < m; ++j) { + U[i * m + j] /= sum*sqrt((double) m); + } + } // print U //printf("U:\n"); @@ -8674,12 +8676,11 @@ void ggml_svd_reduce_dims( //} //printf("\n"); - - printf("n = %d, m = %d, nd = %d\n", n, m, nd); // project A0 onto U for (int i = 0; i < n; ++i) { for (int j = 0; j < nd; ++j) { A[i * nd + j] = 0.0f; + //if (j == 0) continue; for (int k = 0; k < m; ++k) { A[i * nd + j] += A0[i * m + k] * U[j * m + k]; } diff --git a/whisper.cpp b/whisper.cpp index 679b383d107..04a0f25fd90 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -268,6 +268,14 @@ static const std::map MEM_REQ_KV_SELF = { { MODEL_LARGE, 71ull*MB }, }; +static const std::map MEM_REQ_KV_ENC_SELF = { + { MODEL_TINY, 23ull*MB }, + { MODEL_BASE, 26ull*MB }, + { MODEL_SMALL, 216ull*MB }, + { MODEL_MEDIUM, 243ull*MB }, + { MODEL_LARGE, 271ull*MB }, +}; + static const std::map MEM_REQ_KV_CROSS = { { MODEL_TINY, 9ull*MB }, { MODEL_BASE, 18ull*MB }, @@ -571,6 +579,7 @@ struct whisper_context { // cross-attention KV cache for the decoders // shared between all decoders whisper_kv_cache kv_cross; + whisper_kv_cache kv_enc_self; whisper_decoder decoders[WHISPER_MAX_DECODERS] = {}; @@ -807,7 +816,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con MEM_REQ_SCRATCH3.at (model.type) + scale*MEM_REQ_MODEL.at (model.type) + scale*MEM_REQ_KV_CROSS.at(model.type) + - scale*std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)); + scale*std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)); // this is the memory required by one decoder const size_t mem_required_decoder = @@ -838,6 +847,11 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con return false; } + if (!kv_cache_init(model.hparams, scale*MEM_REQ_KV_ENC_SELF.at(model.type), wctx.kv_enc_self, wctx.wtype, model.hparams.n_audio_ctx)) { + fprintf(stderr, "%s: kv_cache_init() failed for cross-attention cache\n", __func__); + return false; + } + { const size_t memory_size = ggml_nbytes(wctx.kv_cross.k) + ggml_nbytes(wctx.kv_cross.v); fprintf(stderr, "%s: kv cross size = %7.2f MB\n", __func__, memory_size/1024.0/1024.0); @@ -1415,6 +1429,9 @@ static bool whisper_encode( } } + struct ggml_cgraph gf = {}; + gf.n_threads = n_threads; + struct ggml_tensor * cur; // convolution + gelu @@ -1442,6 +1459,18 @@ static bool whisper_encode( cur = ggml_gelu(ctx0, cur); } + //{ + // //printf("cur: %d %d %d %d, size element = %d\n", cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_element_size(cur)); + + // wctx.use_buf(ctx0, -1); + + // struct ggml_tensor * k = ggml_view_1d(ctx0, wctx.kv_enc_self.k, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.k)*n_state)*(0*n_ctx)); + // //struct ggml_tensor * v = ggml_view_1d(ctx0, wctx.kv_enc_self.v, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.v)*n_state)*(il*n_ctx)); + + // ggml_build_forward_expand(&gf, ggml_cpy(ctx0, cur, k)); + // //ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + //} + wctx.use_buf(ctx0, 3); // =================================================================== @@ -1522,6 +1551,18 @@ static bool whisper_encode( Vcur), Vcur); + //{ + // //printf("Kcur: %d %d %d %d, size element = %d\n", Kcur->ne[0], Kcur->ne[1], Kcur->ne[2], Kcur->ne[3], ggml_element_size(Kcur)); + + // wctx.use_buf(ctx0, -1); + + // struct ggml_tensor * k = ggml_view_1d(ctx0, wctx.kv_enc_self.k, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.k)*n_state)*(il*n_ctx)); + // struct ggml_tensor * v = ggml_view_1d(ctx0, wctx.kv_enc_self.v, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.v)*n_state)*(il*n_ctx)); + + // ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); + // ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + //} + // ------ wctx.use_buf(ctx0, 0); @@ -1606,6 +1647,18 @@ static bool whisper_encode( cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx)); + + { + //printf("cur: %d %d %d %d, size element = %d\n", cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_element_size(cur)); + + wctx.use_buf(ctx0, -1); + + struct ggml_tensor * k = ggml_view_1d(ctx0, wctx.kv_enc_self.k, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.k)*n_state)*(il*n_ctx)); + //struct ggml_tensor * v = ggml_view_1d(ctx0, wctx.kv_enc_self.v, n_state*n_ctx, (ggml_element_size(wctx.kv_enc_self.v)*n_state)*(il*n_ctx)); + + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, cur, k)); + //ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + } } // projection @@ -1715,8 +1768,6 @@ static bool whisper_encode( // run the computation { - struct ggml_cgraph gf = {}; - gf.n_threads = n_threads; ggml_build_forward_expand(&gf, cur); ggml_graph_compute (ctx0, &gf); @@ -4858,7 +4909,7 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { const int n_state = ctx->model.hparams.n_audio_state; const int n_layer = ctx->model.hparams.n_audio_layer; -#if 1 +#if 0 // use the last layer of the encoder { std::vector embd(n_segments*n_state); @@ -4878,7 +4929,7 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { const int n_features = std::min(4, n_segments); ggml_svd_reduce_dims(n_state, n_segments, embd.data(), n_features); -#else +#elif 0 // use cross kv cache of various layers for (int il = 0; il < n_layer; ++il) { std::vector embd(n_segments*n_ctx*n_state); @@ -4900,10 +4951,56 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { const int n_features = std::min(4, n_segments); + ggml_svd_reduce_dims(n_ctx*n_state, n_segments, embd.data(), n_features); +#elif 0 + // use conv embedding + for (int il = 0; il < 1; ++il) { + std::vector embd(n_segments*n_ctx*n_state); + + for (int i = 0; i < n_segments; ++i) { + const auto & segment_i = ctx->result_all[i]; + printf("%s: layer %2d, segment %3d: t0 = %7d, t1 = %7d, text = %s\n", __func__, il, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); + + ctx->mel.n_len = segment_i.t1; + whisper_encode(*ctx, segment_i.t0, 7, true); + + const size_t offs = ggml_element_size(ctx->kv_enc_self.k)*(il*n_ctx*n_state); + const ggml_fp16_t * f = (const ggml_fp16_t * )((const char *) ctx->kv_enc_self.k->data + offs); + + for (int j = 0; j < n_ctx*n_state; ++j) { + embd[i*n_ctx*n_state + j] = ggml_fp16_to_fp32(f[j]); + } + } + + const int n_features = std::min(3, n_segments); + + ggml_svd_reduce_dims(n_ctx*n_state, n_segments, embd.data(), n_features); +#else + // use enc self kv cache of various layers + for (int il = 0; il < n_layer; ++il) { + std::vector embd(n_segments*n_ctx*n_state); + + for (int i = 0; i < n_segments; ++i) { + const auto & segment_i = ctx->result_all[i]; + printf("%s: layer %2d, segment %3d: t0 = %7d, t1 = %7d, text = %s\n", __func__, il, i, (int) segment_i.t0, (int) segment_i.t1, segment_i.text.c_str()); + + ctx->mel.n_len = segment_i.t1; + whisper_encode(*ctx, segment_i.t0, 7, true); + + const size_t offs = ggml_element_size(ctx->kv_enc_self.k)*(il*n_ctx*n_state); + const ggml_fp16_t * f = (const ggml_fp16_t * )((const char *) ctx->kv_enc_self.k->data + offs); + + for (int j = 0; j < n_ctx*n_state; ++j) { + embd[i*n_ctx*n_state + j] = ggml_fp16_to_fp32(f[j]); + } + } + + const int n_features = std::min(4, n_segments); + ggml_svd_reduce_dims(n_ctx*n_state, n_segments, embd.data(), n_features); #endif - std::vector> features(n_segments); + std::vector> features(n_segments); for (int i = 0; i < n_segments; ++i) { features[i].resize(n_features); @@ -4915,8 +5012,8 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { // fuzzy c-means clustering const int n_clusters = 2; - std::vector> centroids(n_clusters, std::vector(n_features, 0.0)); - std::vector> membership(n_segments, std::vector(n_clusters, 0.0)); + std::vector> centroids(n_clusters, std::vector(n_features, 0.0)); + std::vector> membership(n_segments, std::vector(n_clusters, 0.0)); // initialize the centroids for (int i = 0; i < n_clusters; ++i) { @@ -4928,8 +5025,11 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { // initialize the membership for (int i = 0; i < n_segments; ++i) { //membership[i][i % n_clusters] = 1.0; + //for (int j = 0; j < n_clusters; ++j) { + // membership[i][j] = rand() / (float) RAND_MAX; + //} for (int j = 0; j < n_clusters; ++j) { - membership[i][j] = rand() / (float) RAND_MAX; + membership[i][j] = 1.0 / n_clusters; } } @@ -4937,42 +5037,47 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { // iterate for (int i = 0; i < niter; ++i) { - // update the centroids - for (int j = 0; j < n_clusters; ++j) { - for (int k = 0; k < n_features; ++k) { - centroids[j][k] = 0.0; - } - } - - for (int j = 0; j < n_segments; ++j) { - for (int k = 0; k < n_clusters; ++k) { - for (int l = 0; l < n_features; ++l) { - centroids[k][l] += membership[j][k]*features[j][l]; + // print the membership + if (i == niter - 1) { + //{ + for (int i = 0; i < n_segments; ++i) { +#if 1 + printf("%s: membership %3d: ", __func__, i); + for (int j = 0; j < n_clusters; ++j) { + printf("%.1f ", membership[i][j]); } + printf(" '%s'\n", ctx->result_all[i].text.c_str()); +#else + printf("%s: features : ", __func__); + for (int j = 0; j < n_features; ++j) { + printf("%8.3f ", features[i][j]); + } + printf(" '%s'\n", ctx->result_all[i].text.c_str()); +#endif } - } - - for (int j = 0; j < n_clusters; ++j) { - float sum = 0.0; - for (int k = 0; k < n_segments; ++k) { - sum += membership[k][j]; - } + printf("----------------\n"); - for (int k = 0; k < n_features; ++k) { - centroids[j][k] /= sum; + // print the centroids + for (int i = 0; i < n_clusters; ++i) { + printf("%s: centroid %d: ", __func__, i); + for (int j = 0; j < n_features; ++j) { + printf("%f ", centroids[i][j]); + } + printf("\n"); } } // update the membership for (int j = 0; j < n_segments; ++j) { for (int k = 0; k < n_clusters; ++k) { - float sum = 0.0; + double sum = 0.0; for (int l = 0; l < n_clusters; ++l) { //sum += std::pow(whisper_distance(features[j], centroids[k])/whisper_distance(features[j], centroids[l]), 2.0/(2.0 - 1.0)); double d0 = 0.0; double d1 = 0.0; +#if 1 // use the euclidean distance { for (int m = 0; m < n_features; ++m) { @@ -4985,67 +5090,58 @@ void whisper_full_cluster_segments(struct whisper_context * ctx) { } d1 = std::sqrt(d1); } - +#else // use the cosine distance - //{ - // double dot = 0.0; - // double norm0 = 0.0; - // double norm1 = 0.0; + { + double dot = 0.0; + double norm0 = 0.0; + double norm1 = 0.0; - // for (int m = 0; m < n_features; ++m) { - // dot += features[j][m]*centroids[k][m]; - // norm0 += std::pow(features[j][m], 2.0); - // norm1 += std::pow(centroids[k][m], 2.0); - // } + for (int m = 0; m < n_features; ++m) { + dot += features[j][m]*centroids[k][m]; + norm0 += std::pow(features[j][m], 2.0); + norm1 += std::pow(centroids[k][m], 2.0); + } - // d0 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); + d0 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); - // dot = 0.0; - // norm0 = 0.0; - // norm1 = 0.0; + dot = 0.0; + norm0 = 0.0; + norm1 = 0.0; - // for (int m = 0; m < n_features; ++m) { - // dot += features[j][m]*centroids[l][m]; - // norm0 += std::pow(features[j][m], 2.0); - // norm1 += std::pow(centroids[l][m], 2.0); - // } + for (int m = 0; m < n_features; ++m) { + dot += features[j][m]*centroids[l][m]; + norm0 += std::pow(features[j][m], 2.0); + norm1 += std::pow(centroids[l][m], 2.0); + } - // d1 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); - //} + d1 = 1.0 - dot/(std::sqrt(norm0)*std::sqrt(norm1)); + } +#endif - sum += std::pow(d0/d1, 2.0/(1.15 - 1.0)); + if (d1 > 0.0) { + sum += std::pow(d0/d1, 2.0/(1.20 - 1.0)); + } else { + sum += 1.0; + } } - membership[j][k] = sum == 0.0 ? 0.0 : 1.0/sum; + membership[j][k] = sum == 0.0 ? 1.0 : 1.0/sum; } } - // print the membership - if (i == niter - 1) { - //{ - for (int i = 0; i < n_segments; ++i) { - printf("%s: membership %3d: ", __func__, i); - for (int j = 0; j < n_clusters; ++j) { - printf("%f ", membership[i][j]); + // update the centroids + for (int j = 0; j < n_clusters; ++j) { + for (int k = 0; k < n_features; ++k) { + double sum = 0.0; + double sum2 = 0.0; + for (int l = 0; l < n_segments; ++l) { + sum += membership[l][j]*features[l][k]; + sum2 += membership[l][j]; } - printf(" '%s'\n", ctx->result_all[i].text.c_str()); - //printf("%s: features : ", __func__); - //for (int j = 0; j < n_features; ++j) { - // printf("%8.3f ", features[i][j]); - //} - //printf(" '%s'\n", ctx->result_all[i].text.c_str()); + centroids[j][k] = sum2 == 0.0 ? 0.0 : sum/sum2; } - printf("----------------\n"); - } - } - - // print the centroids - for (int i = 0; i < n_clusters; ++i) { - printf("%s: centroid %d: ", __func__, i); - for (int j = 0; j < n_features; ++j) { - printf("%f ", centroids[i][j]); } - printf("\n"); } }