Compare commits

...

2 Commits

Author SHA1 Message Date
Georgi Gerganov
7a221b672e llama : use F32 precision in Qwen2 attention and no FA (#8412) 2024-07-11 10:21:30 +03:00
Clint Herron
278d0e1846 Initialize default slot sampling parameters from the global context. (#8418) 2024-07-10 20:08:17 -04:00
2 changed files with 3 additions and 1 deletions

View File

@@ -737,6 +737,8 @@ struct server_context {
slot.ga_n = ga_n;
slot.ga_w = ga_w;
slot.sparams = params.sparams;
slot.reset();
slots.push_back(slot);

View File

@@ -8134,7 +8134,7 @@ static struct ggml_tensor * llm_build_kqv(
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
cb(kq, "kq", il);
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);