From 969be5d42b68e7eb94d590f88068ebfce1b864fd Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 27 Feb 2024 14:24:45 -0500 Subject: [PATCH] llama : fix non-quantization of expert gating tensors This reverts a single line from #5475 --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 464e1b89b2827..a6920a5cb4d8f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11213,7 +11213,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s quantize &= !params->only_copy; // do not quantize expert gating tensors - quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight"); + // NOTE: can't use LLM_TN here because the layer number is not known + quantize &= name.find("ffn_gate_inp.weight") == std::string::npos; // do not quantize positional embeddings and token types (BERT) quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");