@@ -67,11 +67,17 @@ int main(int argc, char ** argv) {
67
67
68
68
fflush (stderr);
69
69
70
+ const int n_input = inp.size ();
71
+
72
+ const auto t_enc_start = ggml_time_us ();
73
+
70
74
// eval the prompt with both models
71
75
llama_eval (ctx_tgt, inp.data (), int (inp.size () - 1 ), 0 , params.n_threads );
72
76
llama_eval (ctx_tgt, &inp.back (), 1 , inp.size () - 1 , params.n_threads );
73
77
llama_eval (ctx_dft, inp.data (), int (inp.size ()), 0 , params.n_threads );
74
78
79
+ const auto t_enc_end = ggml_time_us ();
80
+
75
81
// the 2 models should have the same vocab
76
82
const int n_ctx = llama_n_ctx (ctx_tgt);
77
83
const int n_vocab = llama_n_vocab (ctx_tgt);
@@ -103,7 +109,7 @@ int main(int argc, char ** argv) {
103
109
// used to determine end of generation
104
110
bool has_eos = false ;
105
111
106
- const auto t_gen_start = ggml_time_us ();
112
+ const auto t_dec_start = ggml_time_us ();
107
113
108
114
while (true ) {
109
115
LOG (" drafted: %s\n " , LOG_TOKENS_TOSTR_PRETTY (ctx_dft, drafted));
@@ -193,11 +199,12 @@ int main(int argc, char ** argv) {
193
199
drafted.erase (drafted.begin ());
194
200
}
195
201
196
- auto t_gen_end = ggml_time_us ();
202
+ auto t_dec_end = ggml_time_us ();
197
203
198
204
LOG_TEE (" \n\n " );
199
205
200
- LOG_TEE (" generated %d tokens in %.3f seconds, speed: %.3f t/s\n " , n_predict, (t_gen_end - t_gen_start) / 1e6f, n_predict / ((t_gen_end - t_gen_start) / 1e6f));
206
+ LOG_TEE (" encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n " , n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size () / ((t_enc_end - t_enc_start) / 1e6f));
207
+ LOG_TEE (" decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n " , n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
201
208
202
209
// TODO: make sure these numbers are computed correctly
203
210
LOG_TEE (" \n " );
0 commit comments