Compare commits

...

15 Commits

Author SHA1 Message Date
Rithik Sharma
665abc6097 add fast mat-vec kernels for i-quants (#22344) 2026-04-27 08:25:45 -07:00
Igor Rudenko
4414c04b9a Additional test for common/gemma4 : handle parsing edge cases (#22420)
* Additional test for common/gemma4 : handle parsing edge cases

* Move tests to Gemma 4 test group
2026-04-27 16:36:59 +02:00
unraido
ceaf47c4b1 fix: rpc-server cache may not work in Windows environments (#22394)
* fix: create directory and log cache file name.

* Remove GGML_LOG_INFO conditional compilation.

---------

Co-authored-by: kotaro <kotaro.kusunoki@gmail.com>
2026-04-27 17:25:09 +03:00
rankaiyx
42401c72b8 Fix type casting for unaccounted memory calculation (#22424) 2026-04-27 14:31:13 +02:00
Georgi Gerganov
e940b3d468 download : prefer q8_0 when q4_k not available (#22428) 2026-04-27 14:30:29 +02:00
ynankani
0f1bb602dd model : remove duplicate wo_s scale after build_attn (Qwen3, LLaMA) (#22421)
Signed-off-by: Yash Nankani <ynankani@nvidia.com>
2026-04-27 09:58:48 +02:00
Sigbjørn Skjæret
d13540becd convert : remove input_scale for dequantized fp8 modelopt (#22356) 2026-04-27 08:45:01 +02:00
Adrien Gallouët
f84270ea10 ggml : use 64 bytes aligned tile buffers (#21058)
| Model                            | Test   |   t/s OLD |   t/s NEW |   Speedup |
|:---------------------------------|:-------|----------:|----------:|----------:|
| qwen35 0.8B BF16                 | pp512  |    584.59 |    595.41 |      1.02 |
| qwen35 0.8B BF16                 | tg128  |     52.23 |     52.82 |      1.01 |
| qwen35 0.8B IQ2_M - 2.7 bpw      | pp512  |    260.64 |    261.70 |      1.00 |
| qwen35 0.8B IQ2_M - 2.7 bpw      | tg128  |     81.17 |     80.89 |      1.00 |
| qwen35 0.8B IQ2_XXS - 2.0625 bpw | pp512  |    302.36 |    302.56 |      1.00 |
| qwen35 0.8B IQ2_XXS - 2.0625 bpw | tg128  |     84.93 |     85.12 |      1.00 |
| qwen35 0.8B IQ3_XXS - 3.0625 bpw | pp512  |    263.22 |    260.01 |      0.99 |
| qwen35 0.8B IQ3_XXS - 3.0625 bpw | tg128  |     80.29 |     78.94 |      0.98 |
| qwen35 0.8B IQ4_NL - 4.5 bpw     | pp512  |    728.65 |    742.09 |      1.02 |
| qwen35 0.8B IQ4_NL - 4.5 bpw     | tg128  |     82.39 |     84.46 |      1.03 |
| qwen35 0.8B IQ4_XS - 4.25 bpw    | pp512  |    681.33 |    677.06 |      0.99 |
| qwen35 0.8B IQ4_XS - 4.25 bpw    | tg128  |     80.18 |     79.28 |      0.99 |
| qwen35 0.8B Q2_K_M               | pp512  |    413.28 |    415.94 |      1.01 |
| qwen35 0.8B Q2_K_M               | tg128  |     81.90 |     82.78 |      1.01 |
| qwen35 0.8B Q3_K_M               | pp512  |    493.17 |    495.08 |      1.00 |
| qwen35 0.8B Q3_K_M               | tg128  |     82.75 |     83.23 |      1.01 |
| qwen35 0.8B Q3_K_S               | pp512  |    429.35 |    427.64 |      1.00 |
| qwen35 0.8B Q3_K_S               | tg128  |     86.69 |     87.02 |      1.00 |
| qwen35 0.8B Q4_0                 | pp512  |    783.46 |    782.32 |      1.00 |
| qwen35 0.8B Q4_0                 | tg128  |     88.23 |     87.90 |      1.00 |
| qwen35 0.8B Q4_1                 | pp512  |    741.71 |    729.76 |      0.98 |
| qwen35 0.8B Q4_1                 | tg128  |     85.44 |     86.01 |      1.01 |
| qwen35 0.8B Q4_K_M               | pp512  |    676.24 |    681.31 |      1.01 |
| qwen35 0.8B Q4_K_M               | tg128  |     76.59 |     77.06 |      1.01 |
| qwen35 0.8B Q4_K_S               | pp512  |    683.12 |    688.81 |      1.01 |
| qwen35 0.8B Q4_K_S               | tg128  |     80.50 |     81.19 |      1.01 |
| qwen35 0.8B Q5_K_M               | pp512  |    635.33 |    642.11 |      1.01 |
| qwen35 0.8B Q5_K_M               | tg128  |     72.07 |     72.49 |      1.01 |
| qwen35 0.8B Q5_K_S               | pp512  |    660.95 |    658.18 |      1.00 |
| qwen35 0.8B Q5_K_S               | tg128  |     72.19 |     72.95 |      1.01 |
| qwen35 0.8B Q6_K                 | pp512  |    647.97 |    638.84 |      0.99 |
| qwen35 0.8B Q6_K                 | tg128  |     72.83 |     72.49 |      1.00 |
| qwen35 0.8B Q8_0                 | pp512  |    805.01 |    785.49 |      0.98 |
| qwen35 0.8B Q8_0                 | tg128  |     70.10 |     70.13 |      1.00 |

Signed-off-by: Adrien Gallouët <angt@huggingface.co>
2026-04-27 09:30:55 +03:00
Max Krasnyansky
5594d13224 common: fix missing exports in llama-common (#22340)
* common: refactor common/debug to move abort_on_nan into base_callback_data

Passing bool abort_on_nan as template parameter for common_debug_cb_eval is unnecessary and creates an issue with LTO.
It should just be a member of the base_callback_data instead.

* cont : cleanup

* common : use pimpl in debug.h to reduce header dependencies

Move common_debug_cb_user_data's data members (std::regex,
std::vector<uint8_t>) into a private impl struct in debug.cpp.

This removes the includes of common.h and <regex> from debug.h,
reducing transitive dependencies for any translation unit that
includes the header.

Assisted-by: llama.cpp:local pi

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2026-04-27 08:06:39 +03:00
Georgi Gerganov
f535774325 pr2wt : symlink .pi (#22386) 2026-04-26 19:49:26 +03:00
Rithik Sharma
06a811d085 add performance-portable tuning for register-tile and subgroup matmul (#22241) 2026-04-26 09:26:28 -07:00
Gaurav Garg
78433f606f Fix recurrent state serialization for partial reads and writes (#22362)
The previous code worked only for full tensor reads and writes and was hitting `GGML_ASSERT(size == ggml_nbytes(tensor)); ` assert when tested with llama-server.
2026-04-26 13:34:40 +02:00
Johannes Gäßler
7ec36aa861 Github: set meta backend code owner (#22388) 2026-04-26 13:34:13 +02:00
Oliver Simons
b1a5bd4e0c CUDA: better coalesce data-access for contiguous concat (#22330)
Also, distribute all elements across CTAs evenly instead of launching
one CTA per dim
2026-04-26 09:21:45 +02:00
Sigbjørn Skjæret
0c6ee1cade ggml-cpu : re-enable fast gelu_quick_f16 (#22339) 2026-04-26 09:28:14 +03:00
24 changed files with 856 additions and 238 deletions

View File

@@ -53,28 +53,29 @@
/examples/speculative/ @ggerganov
/ggml/cmake/ @ggerganov
/ggml/include/ @ggerganov
/ggml/src/ggml-backend-meta.cpp @JohannesGaessler
/ggml/src/ggml-cann/ @ggml-org/ggml-cann
/ggml/src/ggml-common.h @ggerganov
/ggml/src/ggml-cpu/ @ggerganov
/ggml/src/ggml-cpu/spacemit/ @alex-spacemit
/ggml/src/ggml-cuda/ @ggml-org/ggml-cuda
/ggml/src/ggml-cuda/fattn-wmma* @IMbackK
/ggml/src/ggml-hip/ @IMbackK
/ggml/src/ggml-cuda/vendors/hip.h @IMbackK
/ggml/src/ggml-cuda/fattn-wmma* @IMbackK
/ggml/src/ggml-hexagon/ @ggml-org/ggml-hexagon
/ggml/src/ggml-hip/ @IMbackK
/ggml/src/ggml-impl.h @ggerganov
/ggml/src/ggml-metal/ @ggml-org/ggml-metal
/ggml/src/ggml-opencl/ @ggml-org/ggml-opencl
/ggml/src/ggml-hexagon/ @ggml-org/ggml-hexagon
/ggml/src/ggml-openvino/ @cavusmustafa @wine99
/ggml/src/ggml-opt.cpp @JohannesGaessler
/ggml/src/ggml-quants.* @ggerganov
/ggml/src/ggml-rpc/ @ggml-org/ggml-rpc
/ggml/src/ggml-sycl/ @ggml-org/ggml-sycl
/ggml/src/ggml-threading.* @ggerganov
/ggml/src/ggml-vulkan/ @ggml-org/ggml-vulkan
/ggml/src/ggml-virtgpu/ @kpouget
/ggml/src/ggml-vulkan/ @ggml-org/ggml-vulkan
/ggml/src/ggml-webgpu/ @ggml-org/ggml-webgpu
/ggml/src/ggml-zdnn/ @ggml-org/ggml-zdnn @Andreas-Krebbel @AlekseiNikiforovIBM
/ggml/src/ggml-openvino/ @cavusmustafa @wine99
/ggml/src/ggml.c @ggerganov
/ggml/src/ggml.cpp @ggerganov
/ggml/src/gguf.cpp @JohannesGaessler @Green-Sky

View File

@@ -1,9 +1,38 @@
#include "debug.h"
#include "common.h"
#include "log.h"
#include <cmath>
#include <regex>
#include <string>
#include <vector>
struct common_debug_cb_user_data::impl {
std::vector<uint8_t> data;
std::vector<std::regex> tensor_filters;
bool abort_on_nan{false};
};
common_debug_cb_user_data::common_debug_cb_user_data() : pimpl(std::make_unique<impl>()) {}
common_debug_cb_user_data::~common_debug_cb_user_data() = default;
common_debug_cb_user_data::common_debug_cb_user_data(common_params & params, const std::vector<std::string> & filter_patterns, bool abort_on_nan)
: pimpl(std::make_unique<impl>())
{
for (const auto & pattern : filter_patterns) {
try {
std::string anchored_pattern = "^" + pattern;
pimpl->tensor_filters.emplace_back(anchored_pattern, std::regex::optimize);
} catch (const std::regex_error & e) {
throw std::runtime_error("Invalid regex pattern '" + pattern + "': " + e.what());
}
}
pimpl->abort_on_nan = abort_on_nan;
params.cb_eval = common_debug_cb_eval;
params.cb_eval_user_data = this;
}
static std::string common_ggml_ne_string(const ggml_tensor * t) {
std::string str;
@@ -47,8 +76,7 @@ static float common_ggml_get_float_value(const uint8_t * data,
#define INDENT " "
template <bool abort>
void common_debug_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n) {
static void common_debug_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n, bool abort_on_nan) {
GGML_ASSERT(n > 0);
float sum = 0;
for (int64_t i3 = 0; i3 < ne[3]; i3++) {
@@ -94,7 +122,7 @@ void common_debug_print_tensor(uint8_t * data, ggml_type type, const int64_t * n
LOG(INDENT "sum = %f\n", sum);
}
if constexpr (abort) {
if (abort_on_nan) {
if (std::isnan(sum)) {
LOG("encountered NaN - aborting\n");
exit(0);
@@ -112,8 +140,9 @@ void common_debug_print_tensor(uint8_t * data, ggml_type type, const int64_t * n
* @param user_data user data to pass at each call back
* @return true to receive data or continue the graph, false otherwise
*/
template <bool abort_on_nan> bool common_debug_cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (base_callback_data *) user_data;
bool common_debug_cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (common_debug_cb_user_data *) user_data;
auto * pimpl = cb_data->pimpl.get();
const struct ggml_tensor * src0 = t->src[0];
const struct ggml_tensor * src1 = t->src[1];
@@ -122,10 +151,10 @@ template <bool abort_on_nan> bool common_debug_cb_eval(struct ggml_tensor * t, b
return true; // Always retrieve data
}
bool matches_filter = cb_data->tensor_filters.empty();
bool matches_filter = pimpl->tensor_filters.empty();
if (!matches_filter) {
for (const auto & filter : cb_data->tensor_filters) {
for (const auto & filter : pimpl->tensor_filters) {
if (std::regex_search(t->name, filter)) {
matches_filter = true;
break;
@@ -148,20 +177,14 @@ template <bool abort_on_nan> bool common_debug_cb_eval(struct ggml_tensor * t, b
if (!is_host) {
auto n_bytes = ggml_nbytes(t);
cb_data->data.resize(n_bytes);
ggml_backend_tensor_get(t, cb_data->data.data(), 0, n_bytes);
pimpl->data.resize(n_bytes);
ggml_backend_tensor_get(t, pimpl->data.data(), 0, n_bytes);
}
if (!ggml_is_quantized(t->type) && matches_filter) {
uint8_t * data = is_host ? (uint8_t *) t->data : cb_data->data.data();
common_debug_print_tensor<abort_on_nan>(data, t->type, t->ne, t->nb, 3);
uint8_t * data = is_host ? (uint8_t *) t->data : pimpl->data.data();
common_debug_print_tensor(data, t->type, t->ne, t->nb, 3, pimpl->abort_on_nan);
}
return true;
}
// Explicit template instantiations
template bool common_debug_cb_eval<false>(ggml_tensor *, bool, void *);
template bool common_debug_cb_eval<true>(ggml_tensor *, bool, void *);
template void common_debug_print_tensor<false>(uint8_t *, ggml_type, const int64_t *, const size_t *, int64_t);
template void common_debug_print_tensor<true>(uint8_t *, ggml_type, const int64_t *, const size_t *, int64_t);

View File

@@ -1,43 +1,31 @@
#pragma once
#include "common.h"
#include <memory>
#include <string>
#include <vector>
#include <regex>
// common debug functions and structs
// Print a tensor's detailed data
// data - the tensor's data in byte format
// type - the tensor's quantization type
// ne - the tensor dimensions array
// nb - the tensor strides array
// n - the number of rows/columns to fully print
template <bool abort_on_nan> void common_debug_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n);
struct common_params;
// Intended to use as callback for ggml_backend_sched_eval_callback
// prints tensors that are processed in the computation graph
// by default prints all tensors, but can be configured by creating a `base_callback_data` instance with
// non-empty filter_patterns. See examples/debug.ccp for possible usage patterns
// The template parameter determines whether an error should be thrown whenever a NaN is encountered
// by default prints all tensors, but can be configured by creating a `common_debug_cb_user_data` instance with
// non-empty filter_patterns. See examples/debug.cpp for possible usage patterns
// `common_debug_cb_user_data` contains `abort_on_nan` flag that determines whether an error should be thrown whenever a NaN is encountered
// in a tensor (useful for stopping debug sessions on first erroneous tensor)
// The callback data will be passed as the third parameter (user_data)
template <bool abort_on_nan> bool common_debug_cb_eval(struct ggml_tensor * t, bool ask, void * user_data);
struct base_callback_data {
std::vector<uint8_t> data;
std::vector<std::regex> tensor_filters;
bool common_debug_cb_eval(struct ggml_tensor * t, bool ask, void * user_data);
base_callback_data() = default;
struct common_debug_cb_user_data {
struct impl;
std::unique_ptr<impl> pimpl;
base_callback_data(common_params & params, const std::vector<std::string> & filter_patterns) {
for (const auto & pattern : filter_patterns) {
try {
std::string anchored_pattern = "^" + pattern;
tensor_filters.emplace_back(anchored_pattern, std::regex::optimize);
} catch (const std::regex_error & e) {
throw std::runtime_error("Invalid regex pattern '" + pattern + "': " + e.what());
}
}
params.cb_eval = common_debug_cb_eval<false>;
params.cb_eval_user_data = this;
}
common_debug_cb_user_data();
~common_debug_cb_user_data();
common_debug_cb_user_data(const common_debug_cb_user_data &) = delete;
common_debug_cb_user_data & operator=(const common_debug_cb_user_data &) = delete;
common_debug_cb_user_data(common_params & params, const std::vector<std::string> & filter_patterns, bool abort_on_nan = false);
};

View File

@@ -627,7 +627,7 @@ static hf_cache::hf_file find_best_model(const hf_cache::hf_files & files,
if (!tag.empty()) {
tags.push_back(tag);
} else {
tags = {"Q4_K_M", "Q4_0"};
tags = {"Q4_K_M", "Q8_0"};
}
for (const auto & t : tags) {

View File

@@ -856,7 +856,7 @@ void common_memory_breakdown_print(const struct llama_context * ctx) {
ggml_backend_dev_memory(dev, &free, &total);
const size_t self = mb.model + mb.context + mb.compute;
const size_t unaccounted = total - self - free;
const int64_t unaccounted = static_cast<int64_t>(total) - static_cast<int64_t>(free) - static_cast<int64_t>(self);
table_data.push_back({
template_gpu,
@@ -867,7 +867,7 @@ void common_memory_breakdown_print(const struct llama_context * ctx) {
std::to_string(mb.model / MiB),
std::to_string(mb.context / MiB),
std::to_string(mb.compute / MiB),
std::to_string(unaccounted / MiB)});
std::to_string(unaccounted / static_cast<int64_t>(MiB))});
}
// print memory breakdown for host:

View File

@@ -272,6 +272,22 @@ class ModelBase:
return tensors
@staticmethod
def _scale_is_trivial(scale: Tensor) -> bool:
return scale.numel() <= 1 and abs(float(scale.float().sum()) - 1.0) < 1e-6
def _write_scale_tensor(self, scale_name: str, scale: Tensor):
if not self._scale_is_trivial(scale):
scale_f32 = scale.float().numpy().flatten()
logger.info(f" + {scale_name} (per-tensor scale, shape [{scale_f32.size}])")
self.gguf_writer.add_tensor(scale_name, scale_f32)
def _write_scales_tensor(self, scale_name: str, scales: list[float]):
if not np.allclose(scales, 1.0, atol=1e-6):
scale_vals = np.array(scales, dtype=np.float32)
logger.info(f" + {scale_name} (per-expert scale, shape [{len(scales)}])")
self.gguf_writer.add_tensor(scale_name, scale_vals)
def dequant_model(self):
# If all quantized tensors were already handled (e.g. pure NVFP4), skip
if self._is_nvfp4 and not any(k.endswith((".weight_scale", ".weight_scale_inv")) for k in self.model_tensors):
@@ -494,7 +510,7 @@ class ModelBase:
s = self.model_tensors[name]
self.model_tensors[weight_name] = lambda w=w, s=s: dequant_simple(w(), s(), None)
tensors_to_remove.append(name)
if name.endswith((".k_scale", ".v_scale")):
if name.endswith((".input_scale", ".k_scale", ".v_scale")):
tensors_to_remove.append(name)
elif quant_method is not None:
raise NotImplementedError(f"Quant method is not yet supported: {quant_method!r}")
@@ -602,10 +618,6 @@ class ModelBase:
raw = np.concatenate([d_grouped, qs_grouped], axis=-1).reshape(out_features, n_super * 36)
return raw, [out_features, n_super * 64]
@staticmethod
def _nvfp4_scale2_is_trivial(scale2: Tensor) -> bool:
return scale2.numel() <= 1 and abs(float(scale2.float().sum()) - 1.0) < 1e-6
def _repack_nvfp4(self, name: str, weight: Tensor, scale: Tensor, scale2: Tensor, input_scale: Tensor):
if "language_model." in name:
name = name.replace("language_model.", "")
@@ -616,19 +628,8 @@ class ModelBase:
logger.info(f"Repacked {new_name} with shape {shape} and quantization NVFP4")
self.gguf_writer.add_tensor(new_name, raw, raw_dtype=gguf.GGMLQuantizationType.NVFP4)
# Emit per-tensor scale2 as a separate F32 tensor when non-trivial
if not self._nvfp4_scale2_is_trivial(scale2):
scale2_f32 = scale2.float().numpy().flatten()
scale_name = new_name.replace(".weight", ".scale")
logger.info(f" + {scale_name} (per-tensor NVFP4 scale2, shape [{scale2_f32.size}])")
self.gguf_writer.add_tensor(scale_name, scale2_f32)
# Emit per-tensor input_scale as a separate F32 tensor when non-trivial
if not self._nvfp4_scale2_is_trivial(input_scale):
input_scale_f32 = input_scale.float().numpy().flatten()
input_scale_name = new_name.replace(".weight", ".input_scale")
logger.info(f" + {input_scale_name} (per-tensor NVFP4 input_scale, shape [{input_scale_f32.size}])")
self.gguf_writer.add_tensor(input_scale_name, input_scale_f32)
self._write_scale_tensor(new_name.replace(".weight", ".scale"), scale2)
self._write_scale_tensor(new_name.replace(".weight", ".input_scale"), input_scale)
def _generate_nvfp4_tensors(self):
# Per-layer expert merging to avoid holding all experts in memory
@@ -719,21 +720,11 @@ class ModelBase:
logger.info(f"Repacked {new_name} with shape [{len(experts)}, {shape[0]}, {shape[1]}] and quantization NVFP4")
self.gguf_writer.add_tensor(new_name, merged, raw_dtype=gguf.GGMLQuantizationType.NVFP4)
# Emit per-expert scale2 tensor if any expert has non-trivial scale2
scales.sort(key=lambda x: x[0])
scale_vals = np.array([s[1] for s in scales], dtype=np.float32)
if not np.allclose(scale_vals, 1.0, atol=1e-6):
scale_name = new_name.replace(".weight", ".scale")
logger.info(f" + {scale_name} (per-expert NVFP4 scale2, shape [{len(scales)}])")
self.gguf_writer.add_tensor(scale_name, scale_vals)
self._write_scales_tensor(new_name.replace(".weight", ".scale"), [s[1] for s in scales])
# Emit per-expert input_scale tensor if any expert has non-trivial input_scale
input_scales.sort(key=lambda x: x[0])
input_scale_vals = np.array([s[1] for s in input_scales], dtype=np.float32)
if not np.allclose(input_scale_vals, 1.0, atol=1e-6):
input_scale_name = new_name.replace(".weight", ".input_scale")
logger.info(f" + {input_scale_name} (per-expert NVFP4 input_scale, shape [{len(input_scales)}])")
self.gguf_writer.add_tensor(input_scale_name, input_scale_vals)
self._write_scales_tensor(new_name.replace(".weight", ".input_scale"), [s[1] for s in input_scales])
del experts, merged

View File

@@ -202,10 +202,14 @@ static bool run(llama_context * ctx, const common_params & params) {
print_tokenized_prompt(ctx, tokens, params.prompt);
if (params.save_logits) {
output_data output {ctx, model, params};
std::filesystem::path model_path{params.model.path};
std::string model_name{model_path.stem().string()};
save_output_data(output, model_name, params.logits_output_dir);
try {
output_data output {ctx, model, params};
std::filesystem::path model_path{params.model.path};
std::string model_name{model_path.stem().string()};
save_output_data(output, model_name, params.logits_output_dir);
} catch (const std::exception & e) {
LOG_ERR("%s : error saving logits: %s\n", __func__, e.what());
}
}
return true;
@@ -223,7 +227,7 @@ int main(int argc, char ** argv) {
llama_backend_init();
llama_numa_init(params.numa);
std::optional<base_callback_data> cb_data;
std::optional<common_debug_cb_user_data> cb_data;
if (!params.save_logits) {
cb_data.emplace(params, params.tensor_filter);
}

View File

@@ -3,7 +3,6 @@
#include "debug.h"
#include "log.h"
#include "llama.h"
#include "llama-cpp.h"
#include <clocale>
#include <string>
@@ -38,7 +37,7 @@ static bool run(llama_context * ctx, const common_params & params) {
int main(int argc, char ** argv) {
std::setlocale(LC_NUMERIC, "C");
base_callback_data cb_data;
common_debug_cb_user_data cb_data;
common_params params;
@@ -53,7 +52,7 @@ int main(int argc, char ** argv) {
// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
params.cb_eval = common_debug_cb_eval<false>;
params.cb_eval = common_debug_cb_eval;
params.cb_eval_user_data = &cb_data;
params.warmup = false;

View File

@@ -1205,40 +1205,57 @@ static void ggml_backend_meta_buffer_set_tensor(ggml_backend_buffer_t buffer, gg
if (split_state.n_segments != 1) {
GGML_ASSERT(split_state.axis >= 0 && split_state.axis < GGML_MAX_DIMS);
GGML_ASSERT(offset == 0);
GGML_ASSERT(size == ggml_nbytes(tensor));
GGML_ASSERT(tensor->ne[3] == 1);
size_t offset_data = 0;
std::vector<size_t> simple_offsets(n_bufs, 0);
if (split_state.axis == GGML_BACKEND_SPLIT_AXIS_0) {
GGML_ASSERT(tensor->ne[2] == 1);
const size_t row_stride = tensor->nb[1];
GGML_ASSERT(offset % row_stride == 0);
GGML_ASSERT(size % row_stride == 0);
const int64_t r_start = offset / row_stride;
const int64_t r_count = size / row_stride;
GGML_ASSERT(r_start + r_count <= tensor->ne[1]);
const int64_t blck_size = ggml_blck_size(tensor->type);
for (size_t s = 0; s < split_state.n_segments; s++) {
for (size_t j = 0; j < n_bufs; j++) {
ggml_tensor * simple_tensor = ggml_backend_meta_buffer_simple_tensor(tensor, j);
GGML_ASSERT(split_state.ne[s*n_bufs + j] % blck_size == 0);
const size_t nbytes = split_state.ne[s*n_bufs + j]/blck_size * tensor->nb[0];
ggml_backend_tensor_set_2d(simple_tensor, (const char *) data + offset_data, simple_offsets[j], nbytes,
tensor->ne[1], simple_tensor->nb[1], tensor->nb[1]);
ggml_backend_tensor_set_2d(simple_tensor, (const char *) data + offset_data,
simple_offsets[j] + r_start * simple_tensor->nb[1], nbytes,
r_count, simple_tensor->nb[1], tensor->nb[1]);
offset_data += nbytes;
simple_offsets[j] += nbytes;
}
}
GGML_ASSERT(offset_data*tensor->ne[1] == size);
GGML_ASSERT(offset_data*r_count == size);
return;
}
GGML_ASSERT(split_state.axis == GGML_BACKEND_SPLIT_AXIS_1);
const size_t row_stride = tensor->nb[2];
GGML_ASSERT(offset % row_stride == 0);
GGML_ASSERT(size % row_stride == 0);
const int64_t r_start = offset / row_stride;
const int64_t r_count = size / row_stride;
GGML_ASSERT(r_start + r_count <= tensor->ne[2]);
for (size_t s = 0; s < split_state.n_segments; s++) {
for (size_t j = 0; j < n_bufs; j++) {
ggml_tensor * simple_tensor = ggml_backend_meta_buffer_simple_tensor(tensor, j);
const size_t nbytes = split_state.ne[s*n_bufs + j] * tensor->nb[1];
ggml_backend_tensor_set_2d(simple_tensor, (const char *) data + offset_data, simple_offsets[j], nbytes,
tensor->ne[2], simple_tensor->nb[2], tensor->nb[2]);
ggml_backend_tensor_set_2d(simple_tensor, (const char *) data + offset_data,
simple_offsets[j] + r_start * simple_tensor->nb[2], nbytes,
r_count, simple_tensor->nb[2], tensor->nb[2]);
offset_data += nbytes;
simple_offsets[j] += nbytes;
}
}
GGML_ASSERT(offset_data*tensor->ne[2] == size);
GGML_ASSERT(offset_data*r_count == size);
return;
}
@@ -1295,40 +1312,57 @@ static void ggml_backend_meta_buffer_get_tensor(ggml_backend_buffer_t buffer, co
if (split_state.n_segments != 1) {
GGML_ASSERT(split_state.axis >= 0 && split_state.axis < GGML_MAX_DIMS);
GGML_ASSERT(offset == 0);
GGML_ASSERT(size == ggml_nbytes(tensor));
GGML_ASSERT(tensor->ne[3] == 1);
size_t offset_data = 0;
std::vector<size_t> simple_offsets(n_bufs, 0);
if (split_state.axis == GGML_BACKEND_SPLIT_AXIS_0) {
GGML_ASSERT(tensor->ne[2] == 1);
const size_t row_stride = tensor->nb[1];
GGML_ASSERT(offset % row_stride == 0);
GGML_ASSERT(size % row_stride == 0);
const int64_t r_start = offset / row_stride;
const int64_t r_count = size / row_stride;
GGML_ASSERT(r_start + r_count <= tensor->ne[1]);
const int64_t blck_size = ggml_blck_size(tensor->type);
for (size_t s = 0; s < split_state.n_segments; s++) {
for (size_t j = 0; j < n_bufs; j++) {
const ggml_tensor * simple_tensor = ggml_backend_meta_buffer_simple_tensor(tensor, j);
GGML_ASSERT(split_state.ne[s*n_bufs + j] % blck_size == 0);
const size_t nbytes = split_state.ne[s*n_bufs + j]/blck_size * tensor->nb[0];
ggml_backend_tensor_get_2d(simple_tensor, (char *) data + offset_data, simple_offsets[j], nbytes,
tensor->ne[1], simple_tensor->nb[1], tensor->nb[1]);
ggml_backend_tensor_get_2d(simple_tensor, (char *) data + offset_data,
simple_offsets[j] + r_start * simple_tensor->nb[1], nbytes,
r_count, simple_tensor->nb[1], tensor->nb[1]);
offset_data += nbytes;
simple_offsets[j] += nbytes;
}
}
GGML_ASSERT(offset_data*tensor->ne[1] == size);
GGML_ASSERT(offset_data*r_count == size);
return;
}
GGML_ASSERT(split_state.axis == GGML_BACKEND_SPLIT_AXIS_1);
const size_t row_stride = tensor->nb[2];
GGML_ASSERT(offset % row_stride == 0);
GGML_ASSERT(size % row_stride == 0);
const int64_t r_start = offset / row_stride;
const int64_t r_count = size / row_stride;
GGML_ASSERT(r_start + r_count <= tensor->ne[2]);
for (size_t s = 0; s < split_state.n_segments; s++) {
for (size_t j = 0; j < n_bufs; j++) {
const ggml_tensor * simple_tensor = ggml_backend_meta_buffer_simple_tensor(tensor, j);
const size_t nbytes = split_state.ne[s*n_bufs + j] * tensor->nb[1];
ggml_backend_tensor_get_2d(simple_tensor, (char *) data + offset_data, simple_offsets[j], nbytes,
tensor->ne[2], simple_tensor->nb[2], tensor->nb[2]);
ggml_backend_tensor_get_2d(simple_tensor, (char *) data + offset_data,
simple_offsets[j] + r_start * simple_tensor->nb[2], nbytes,
r_count, simple_tensor->nb[2], tensor->nb[2]);
offset_data += nbytes;
simple_offsets[j] += nbytes;
}
}
GGML_ASSERT(offset_data*tensor->ne[2] == size);
GGML_ASSERT(offset_data*r_count == size);
return;
}

View File

@@ -2005,12 +2005,12 @@ void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const v
const int lda = KB * sizeof(TA);
//const int ldb = KB * sizeof(TB);
static thread_local packed_B_t Tile0[TILE_N * TILE_K];
static thread_local packed_B_t Tile1[TILE_N * TILE_K];
static thread_local int8_t Tile23[TILE_M * TILE_K];
alignas(64) static thread_local packed_B_t Tile0[TILE_N * TILE_K];
alignas(64) static thread_local packed_B_t Tile1[TILE_N * TILE_K];
alignas(64) static thread_local int8_t Tile23[TILE_M * TILE_K];
static thread_local int32_t TileC0[TILE_M * TILE_N * 4];
static thread_local int32_t TileC1[TILE_M * TILE_N * 4];
alignas(64) static thread_local int32_t TileC0[TILE_M * TILE_N * 4];
alignas(64) static thread_local int32_t TileC1[TILE_M * TILE_N * 4];
// double buffering C to interleave avx512 and amx
int32_t * C_cur = TileC0;
@@ -2187,21 +2187,21 @@ void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const v
const int m1 = std::max(M - TILE_M, 0);
//const int lda = KB * sizeof(TA);
static thread_local int8_t Tile0[TILE_N * TILE_K];
static thread_local int8_t Tile1[TILE_N * TILE_K];
static thread_local int8_t Tile23[TILE_M * TILE_K];
alignas(64) static thread_local int8_t Tile0[TILE_N * TILE_K];
alignas(64) static thread_local int8_t Tile1[TILE_N * TILE_K];
alignas(64) static thread_local int8_t Tile23[TILE_M * TILE_K];
// mat mul result for each group
static thread_local int32_t Tile4[TILE_M * TILE_N];
static thread_local int32_t Tile5[TILE_M * TILE_N];
static thread_local int32_t Tile6[TILE_M * TILE_N];
static thread_local int32_t Tile7[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Tile4[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Tile5[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Tile6[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Tile7[TILE_M * TILE_N];
// sum of each QK_K block, contains 8 groups, int32
static thread_local int32_t Sumi4[TILE_M * TILE_N];
static thread_local int32_t Sumi5[TILE_M * TILE_N];
static thread_local int32_t Sumi6[TILE_M * TILE_N];
static thread_local int32_t Sumi7[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Sumi4[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Sumi5[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Sumi6[TILE_M * TILE_N];
alignas(64) static thread_local int32_t Sumi7[TILE_M * TILE_N];
const int k_group_size = std::is_same<TB, block_q6_K>::value ? 16 : 32;
for (int i = 0; i < KB; ++i) {

View File

@@ -1036,12 +1036,12 @@ inline static float ggml_gelu_quick_f32(float x) {
return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
}
//inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
// const uint16_t * i16 = (const uint16_t *) x;
// for (int i = 0; i < n; ++i) {
// y[i] = ggml_table_gelu_quick_f16[i16[i]];
// }
//}
inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
const uint16_t * i16 = (const uint16_t *) x;
for (int i = 0; i < n; ++i) {
y[i] = ggml_table_gelu_quick_f16[i16[i]];
}
}
#ifdef GGML_GELU_QUICK_FP16
inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
@@ -1060,13 +1060,6 @@ inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float *
}
#endif
inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
for (int i = 0; i < n; ++i) {
float v = GGML_CPU_FP16_TO_FP32(x[i]);
y[i] = GGML_CPU_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v))));
}
}
// Sigmoid Linear Unit (SiLU) function
inline static float ggml_silu_f32(float x) {
return x/(1.0f + expf(-x));

View File

@@ -1,96 +1,79 @@
#include "concat.cuh"
// contiguous kernels
static __global__ void concat_f32_dim0(const float * x, const float * y, float * dst, const int ne0, const int ne00) {
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
if (nidx >= ne0) {
return;
}
template <int dim>
static __global__ void __launch_bounds__(CUDA_CONCAT_BLOCK_SIZE) concat_f32_cont(const float * x,
const float * y,
float * dst,
int64_t ne00,
int64_t ne01,
int64_t ne02,
int64_t ne0,
int64_t ne1,
int64_t ne2) {
static_assert(dim >= 0 && dim <= 2, "dim must be in [0, 2]");
int offset_dst =
nidx +
blockIdx.y * ne0 +
blockIdx.z * ne0 * gridDim.y;
const int64_t n = ne0 * ne1 * ne2;
if (nidx < ne00) { // src0
int offset_src =
nidx +
blockIdx.y * ne00 +
blockIdx.z * ne00 * gridDim.y;
dst[offset_dst] = x[offset_src];
} else {
int offset_src =
(nidx - ne00) +
blockIdx.y * (ne0 - ne00) +
blockIdx.z * (ne0 - ne00) * gridDim.y;
dst[offset_dst] = y[offset_src];
for (int64_t i = (int64_t) blockIdx.x * blockDim.x + threadIdx.x; i < n; i += (int64_t) blockDim.x * gridDim.x) {
if constexpr (dim == 0) {
const int64_t row = i / ne0;
const int64_t i0 = i - row * ne0;
if (i0 < ne00) {
dst[i] = x[row * ne00 + i0];
} else {
dst[i] = y[row * (ne0 - ne00) + (i0 - ne00)];
}
} else if constexpr (dim == 1) {
const int64_t dst_plane = ne0 * ne1;
const int64_t src0_plane = ne0 * ne01;
const int64_t src1_plane = dst_plane - src0_plane;
const int64_t i2 = i / dst_plane;
const int64_t i01 = i - i2 * dst_plane;
if (i01 < src0_plane) {
dst[i] = x[i2 * src0_plane + i01];
} else {
dst[i] = y[i2 * src1_plane + (i01 - src0_plane)];
}
} else {
const int64_t src0_size = ne0 * ne1 * ne02;
if (i < src0_size) {
dst[i] = x[i];
} else {
dst[i] = y[i - src0_size];
}
}
}
}
static __global__ void concat_f32_dim1(const float * x, const float * y, float * dst, const int ne0, const int ne01) {
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
if (nidx >= ne0) {
return;
}
static void concat_f32_cuda(const float * x,
const float * y,
float * dst,
int64_t ne00,
int64_t ne01,
int64_t ne02,
int64_t ne0,
int64_t ne1,
int64_t ne2,
int dim,
cudaStream_t stream) {
const int64_t n = ne0 * ne1 * ne2;
const int num_blocks = (n + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE;
int offset_dst =
nidx +
blockIdx.y * ne0 +
blockIdx.z * ne0 * gridDim.y;
if (blockIdx.y < (unsigned)ne01) { // src0
int offset_src =
nidx +
blockIdx.y * ne0 +
blockIdx.z * ne0 * ne01;
dst[offset_dst] = x[offset_src];
} else {
int offset_src =
nidx +
(blockIdx.y - ne01) * ne0 +
blockIdx.z * ne0 * (gridDim.y - ne01);
dst[offset_dst] = y[offset_src];
}
}
static __global__ void concat_f32_dim2(const float * x, const float * y, float * dst, const int ne0, const int ne02) {
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
if (nidx >= ne0) {
return;
}
int offset_dst =
nidx +
blockIdx.y * ne0 +
blockIdx.z * ne0 * gridDim.y;
if (blockIdx.z < (unsigned)ne02) { // src0
int offset_src =
nidx +
blockIdx.y * ne0 +
blockIdx.z * ne0 * gridDim.y;
dst[offset_dst] = x[offset_src];
} else {
int offset_src =
nidx +
blockIdx.y * ne0 +
(blockIdx.z - ne02) * ne0 * gridDim.y;
dst[offset_dst] = y[offset_src];
}
}
static void concat_f32_cuda(const float * x, const float * y, float * dst, int ne00, int ne01, int ne02, int ne0, int ne1, int ne2, int dim, cudaStream_t stream) {
int num_blocks = (ne0 + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE;
dim3 gridDim(num_blocks, ne1, ne2);
if (dim == 0) {
concat_f32_dim0<<<gridDim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne0, ne00);
concat_f32_cont<0>
<<<num_blocks, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne00, ne01, ne02, ne0, ne1, ne2);
return;
}
if (dim == 1) {
concat_f32_dim1<<<gridDim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne0, ne01);
concat_f32_cont<1>
<<<num_blocks, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne00, ne01, ne02, ne0, ne1, ne2);
return;
}
concat_f32_dim2<<<gridDim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne0, ne02);
concat_f32_cont<2><<<num_blocks, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne00, ne01, ne02, ne0, ne1, ne2);
}
// non-contiguous kernel (slow)

View File

@@ -1101,7 +1101,7 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
fs::path cache_file = fs::path(cache_dir) / hash_str;
std::ofstream ofs(cache_file, std::ios::binary);
ofs.write((const char *)data, size);
GGML_LOG_INFO("[%s] saved to '%s'\n", __func__, cache_file.c_str());
GGML_LOG_INFO("[%s] saved to '%s'\n", __func__, cache_file.string().c_str());
}
ggml_backend_tensor_set(tensor, data, offset, size);
return true;

View File

@@ -26,20 +26,23 @@
// Matrix multiplication parameters
// Register tiling parameters
#define WEBGPU_MUL_MAT_TILE_M 8
#define WEBGPU_MUL_MAT_TILE_N 8
#define WEBGPU_MUL_MAT_TILE_M 4
#define WEBGPU_MUL_MAT_TILE_N 4
#define WEBGPU_MUL_MAT_WG_SIZE_M 8
#define WEBGPU_MUL_MAT_WG_SIZE_N 8
#define WEBGPU_MUL_MAT_TILE_K 32
#define WEBGPU_MUL_MAT_REG_TILE_K_FLOAT 8
#define WEBGPU_MUL_MAT_REG_TILE_K_QUANT 32
// Subgroup matrix parameters
// The number of subgroups in the M dimension
#define WEBGPU_MUL_MAT_SUBGROUP_M 2
// The number of subgroups in the N dimension
#define WEBGPU_MUL_MAT_SUBGROUP_N 2
#define WEBGPU_MUL_MAT_SUBGROUP_N 4
// The number of subgroup matrices each subgroup accumulates over
#define WEBGPU_MUL_MAT_SUBGROUP_MATRIX_M 4
#define WEBGPU_MUL_MAT_SUBGROUP_MATRIX_N 2
#define WEBGPU_MUL_MAT_SUBGROUP_TILE_K_FLOAT 32
#define WEBGPU_MUL_MAT_SUBGROUP_TILE_K_QUANT 32
// Matrix-vector multiplication parameters
#define WEBGPU_MUL_MAT_VEC_WG_SIZE 256
@@ -1612,6 +1615,24 @@ class ggml_webgpu_shader_lib {
defines.push_back("MUL_ACC_" + type_upper);
defines.push_back("U32_DEQUANT_HELPERS");
defines.push_back("SRC0_INNER_TYPE=u32");
switch (context.src0->type) {
case GGML_TYPE_IQ1_S:
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
defines.push_back(type_upper + "_GRID");
break;
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
defines.push_back(type_upper + "_GRID");
defines.push_back(type_upper + "_TABLES");
break;
default:
break;
}
break;
}
}
@@ -1734,13 +1755,24 @@ class ggml_webgpu_shader_lib {
// VEC/SCALAR controls
defines.push_back(key.vectorized ? "VEC" : "SCALAR");
const bool is_quant = ggml_is_quantized(context.src0->type);
uint32_t tile_k;
if (key.use_subgroup_matrix) {
tile_k = is_quant ? WEBGPU_MUL_MAT_SUBGROUP_TILE_K_QUANT
: WEBGPU_MUL_MAT_SUBGROUP_TILE_K_FLOAT;
} else {
tile_k = is_quant ? WEBGPU_MUL_MAT_REG_TILE_K_QUANT
: WEBGPU_MUL_MAT_REG_TILE_K_FLOAT;
}
// Tiles
defines.push_back("TILE_M=" + std::to_string(WEBGPU_MUL_MAT_TILE_M) + "u");
defines.push_back("TILE_N=" + std::to_string(WEBGPU_MUL_MAT_TILE_N) + "u");
defines.push_back("TILE_K=" + std::to_string(WEBGPU_MUL_MAT_TILE_K) + "u");
// Subgroup matrix specifics
if (key.use_subgroup_matrix) {
defines.push_back("TILE_K=" + std::to_string(tile_k) + "u");
defines.push_back("MAX_SUBGROUP_SIZE=" + std::to_string(context.max_subgroup_size) + "u");
defines.push_back("SUBGROUP_M=" + std::to_string(WEBGPU_MUL_MAT_SUBGROUP_M) + "u");
defines.push_back("SUBGROUP_N=" + std::to_string(WEBGPU_MUL_MAT_SUBGROUP_N) + "u");
@@ -1760,12 +1792,13 @@ class ggml_webgpu_shader_lib {
if (!key.use_subgroup_matrix) {
defines.push_back("WORKGROUP_SIZE_M=" + std::to_string(WEBGPU_MUL_MAT_WG_SIZE_M) + "u");
defines.push_back("WORKGROUP_SIZE_N=" + std::to_string(WEBGPU_MUL_MAT_WG_SIZE_N) + "u");
defines.push_back("TILE_K=" + std::to_string(tile_k) + "u");
}
auto processed = preprocessor.preprocess(shader_src, defines);
auto decisions = std::make_shared<ggml_webgpu_mul_mat_shader_decisions>();
decisions->tile_k = WEBGPU_MUL_MAT_TILE_K;
decisions->tile_k = tile_k;
decisions->tile_m = WEBGPU_MUL_MAT_TILE_M;
decisions->tile_n = WEBGPU_MUL_MAT_TILE_N;
decisions->use_subgroup_matrix = key.use_subgroup_matrix;
@@ -1962,10 +1995,15 @@ class ggml_webgpu_shader_lib {
defines.push_back("SCALAR");
// mul_mat_id is register-tile only.
const uint32_t tile_k = ggml_is_quantized(context.src0->type)
? WEBGPU_MUL_MAT_REG_TILE_K_QUANT
: WEBGPU_MUL_MAT_REG_TILE_K_FLOAT;
// Tiles
defines.push_back("TILE_M=" + std::to_string(WEBGPU_MUL_MAT_TILE_M) + "u");
defines.push_back("TILE_N=" + std::to_string(WEBGPU_MUL_MAT_TILE_N) + "u");
defines.push_back("TILE_K=" + std::to_string(WEBGPU_MUL_MAT_TILE_K) + "u");
defines.push_back("TILE_K=" + std::to_string(tile_k) + "u");
defines.push_back("WORKGROUP_SIZE_M=" + std::to_string(WEBGPU_MUL_MAT_WG_SIZE_M) + "u");
defines.push_back("WORKGROUP_SIZE_N=" + std::to_string(WEBGPU_MUL_MAT_WG_SIZE_N) + "u");
@@ -1976,7 +2014,7 @@ class ggml_webgpu_shader_lib {
auto processed = preprocessor.preprocess(wgsl_mul_mat_id, defines);
auto decisions = std::make_shared<ggml_webgpu_mul_mat_shader_decisions>();
decisions->tile_k = WEBGPU_MUL_MAT_TILE_K;
decisions->tile_k = tile_k;
decisions->tile_m = WEBGPU_MUL_MAT_TILE_M;
decisions->tile_n = WEBGPU_MUL_MAT_TILE_N;
decisions->wg_size_m = WEBGPU_MUL_MAT_WG_SIZE_M;

View File

@@ -1391,6 +1391,17 @@ static webgpu_encoded_op ggml_webgpu_mul_mat(webgpu_context & ctx,
case GGML_TYPE_Q2_K:
use_fast = true;
break;
case GGML_TYPE_IQ1_S:
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
use_fast = is_vec;
break;
default:
break;
}

View File

@@ -812,6 +812,520 @@ fn main(
}
#endif
#ifdef MUL_ACC_IQ1_S
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 50
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let qh = load_u32_at_src0(block_byte_base + 34u + sub_blk * 2u) & 0xFFFFu;
let dl = d * f32(2u * ((qh >> 12u) & 7u) + 1u);
let delta = select(IQ1_DELTA, -IQ1_DELTA, (qh & 0x8000u) != 0u);
let qs_w = load_u32_at_src0(block_byte_base + 2u + sub_blk * 4u);
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let qs_byte = get_byte(qs_w, l);
let ig = (qs_byte | (((qh >> (3u * l)) & 7u) << 8u)) * 8u;
let gw = iq1_grid[ig / 16u];
let bit_base = (ig % 16u) * 2u;
for (var j = 0u; j < 8u; j++) {
let g = (gw >> (bit_base + j * 2u)) & 3u;
let gs = select(f32(g), f32(g) - 4.0, (g & 2u) != 0u);
row_sum += dl * (gs + delta) * x_block[ll * 8u + j];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ1_M
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 56
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let sc_lo = load_u32_at_src0(block_byte_base + 48u);
let sc_hi = load_u32_at_src0(block_byte_base + 52u);
let sc0 = sc_lo & 0xFFFFu;
let sc1 = (sc_lo >> 16u) & 0xFFFFu;
let sc2 = sc_hi & 0xFFFFu;
let sc3 = (sc_hi >> 16u) & 0xFFFFu;
let d_bits = (sc0 >> 12u) | ((sc1 >> 8u) & 0xF0u) | ((sc2 >> 4u) & 0xF00u) | (sc3 & 0xF000u);
let d = f32(bitcast<vec2<f16>>(d_bits)[0]);
let sc_u16 = select(select(sc2, sc3, sub_blk >= 6u),
select(sc0, sc1, sub_blk >= 2u),
sub_blk < 4u);
let qs_w = load_u32_at_src0(block_byte_base + sub_blk * 4u);
let qh = load_u32_at_src0(block_byte_base + 32u + sub_blk * 2u) & 0xFFFFu;
let qh_lo = qh & 0xFFu;
let qh_hi = (qh >> 8u) & 0xFFu;
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let bit_off = 6u * (sub_blk % 2u) + 3u * (l / 2u);
let sub_scale = (sc_u16 >> bit_off) & 0x7u;
let dl = d * f32(2u * sub_scale + 1u);
let qh_byte = select(qh_lo, qh_hi, l >= 2u);
let ll2 = l % 2u;
let grid_idx = get_byte(qs_w, l) | (((qh_byte >> (4u * ll2)) & 7u) << 8u);
let delta = select(IQ1_DELTA, -IQ1_DELTA, ((qh_byte >> (3u + 4u * ll2)) & 1u) != 0u);
let ig = grid_idx * 8u;
let gw = iq1_grid[ig / 16u];
let bit_base = (ig % 16u) * 2u;
for (var j = 0u; j < 8u; j++) {
let g = (gw >> (bit_base + j * 2u)) & 3u;
let gs = select(f32(g), f32(g) - 4.0, (g & 2u) != 0u);
row_sum += dl * (gs + delta) * x_block[ll * 8u + j];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ2_XXS
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 66
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let aux_lo = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u);
let aux_hi = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u + 4u);
let ls = aux_hi >> 28u;
let db = d * (0.5 + f32(ls)) * 0.25;
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let grid_idx = (aux_lo >> (8u * l)) & 0xFFu;
let signs_idx = (aux_hi >> (7u * l)) & 0x7Fu;
let signs = (ksigns_iq2xs[signs_idx / 4u] >> ((signs_idx % 4u) * 8u)) & 0xFFu;
let gw_lo = iq2xxs_grid[grid_idx * 2u];
let gw_hi = iq2xxs_grid[grid_idx * 2u + 1u];
for (var j = 0u; j < 8u; j++) {
let gw = select(gw_hi, gw_lo, j < 4u);
let b = f32((gw >> ((j & 3u) * 8u)) & 0xFFu);
let s = select(1.0, -1.0, ((signs >> j) & 1u) != 0u);
row_sum += db * b * s * x_block[ll * 8u + j];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ2_XS
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 74
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let qs_lo = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u);
let qs_hi = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u + 4u);
let scales_word = load_u32_at_src0(block_byte_base + 66u + (sub_blk / 4u) * 4u);
let scales_byte = get_byte(scales_word, sub_blk % 4u);
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let qs_word = select(qs_hi, qs_lo, l < 2u);
let half2 = (l % 2u) * 16u;
let qs_val = (qs_word >> half2) & 0xFFFFu;
let grid_idx = qs_val & 0x1FFu;
let signs_idx = (qs_val >> 9u) & 0x7Fu;
let sub_scale = (scales_byte >> (4u * (l / 2u))) & 0xFu;
let db = d * (0.5 + f32(sub_scale)) * 0.25;
let signs = (ksigns_iq2xs[signs_idx / 4u] >> ((signs_idx % 4u) * 8u)) & 0xFFu;
let gw_lo = iq2xs_grid[grid_idx * 2u];
let gw_hi = iq2xs_grid[grid_idx * 2u + 1u];
for (var j = 0u; j < 8u; j++) {
let gw = select(gw_hi, gw_lo, j < 4u);
let b = f32((gw >> ((j & 3u) * 8u)) & 0xFFu);
let s = select(1.0, -1.0, ((signs >> j) & 1u) != 0u);
row_sum += db * b * s * x_block[ll * 8u + j];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ2_S
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 82
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let qs_w = load_u32_at_src0(block_byte_base + 2u + sub_blk * 4u);
let sg_w = load_u32_at_src0(block_byte_base + 34u + sub_blk * 4u);
let qh_word = load_u32_at_src0(block_byte_base + 66u + (sub_blk / 4u) * 4u);
let qh_byte = get_byte(qh_word, sub_blk % 4u);
let sc_word = load_u32_at_src0(block_byte_base + 74u + (sub_blk / 4u) * 4u);
let scales_byte = get_byte(sc_word, sub_blk % 4u);
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let qs_byte = get_byte(qs_w, l);
let sign_byte = get_byte(sg_w, l);
let grid_idx = qs_byte | (((qh_byte >> (2u * l)) & 3u) << 8u);
let sub_scale = (scales_byte >> (4u * (l / 2u))) & 0xFu;
let db = d * (0.5 + f32(sub_scale)) * 0.25;
let gw_lo = iq2s_grid[grid_idx * 2u];
let gw_hi = iq2s_grid[grid_idx * 2u + 1u];
for (var j = 0u; j < 8u; j++) {
let gw = select(gw_hi, gw_lo, j < 4u);
let b = f32((gw >> ((j & 3u) * 8u)) & 0xFFu);
let s = select(1.0, -1.0, ((sign_byte >> j) & 1u) != 0u);
row_sum += db * b * s * x_block[ll * 8u + j];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ3_XXS
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 98
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let qs_lo = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u);
let qs_hi = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u + 4u);
let aux = load_u32_at_src0(block_byte_base + 66u + sub_blk * 4u);
let ls = aux >> 28u;
let db = d * (0.5 + f32(ls)) * 0.5;
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let qs_word = select(qs_hi, qs_lo, l < 2u);
let byte_pos = (l % 2u) * 2u;
let grid_idx_0 = (qs_word >> (byte_pos * 8u)) & 0xFFu;
let grid_idx_1 = (qs_word >> ((byte_pos + 1u) * 8u)) & 0xFFu;
let signs_idx = (aux >> (7u * l)) & 0x7Fu;
let signs = (ksigns_iq2xs[signs_idx / 4u] >> ((signs_idx % 4u) * 8u)) & 0xFFu;
let grid1 = iq3xxs_grid[grid_idx_0];
let grid2 = iq3xxs_grid[grid_idx_1];
for (var j = 0u; j < 4u; j++) {
let b1 = f32((grid1 >> (j * 8u)) & 0xFFu);
let b2 = f32((grid2 >> (j * 8u)) & 0xFFu);
let s1 = select(1.0, -1.0, ((signs >> j) & 1u) != 0u);
let s2 = select(1.0, -1.0, ((signs >> (j + 4u)) & 1u) != 0u);
row_sum += db * b1 * s1 * x_block[ll * 8u + j];
row_sum += db * b2 * s2 * x_block[ll * 8u + j + 4u];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ3_S
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 110
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let slot0 = half * 2u;
let y_offset = sub_blk * 32u + slot0 * 8u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let qs_lo = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u);
let qs_hi = load_u32_at_src0(block_byte_base + 2u + sub_blk * 8u + 4u);
let qh_word = load_u32_at_src0(block_byte_base + 66u + (sub_blk / 4u) * 4u);
let qh_byte = get_byte(qh_word, sub_blk % 4u);
let sg_w = load_u32_at_src0(block_byte_base + 74u + sub_blk * 4u);
let sc_word = load_u32_at_src0(block_byte_base + 106u);
let scales_byte = get_byte(sc_word, sub_blk / 2u);
let sub_scale = (scales_byte >> (4u * (sub_blk % 2u))) & 0xFu;
let db = d * (1.0 + 2.0 * f32(sub_scale));
var row_sum = 0.0;
for (var ll = 0u; ll < 2u; ll++) {
let l = slot0 + ll;
let qs_word = select(qs_hi, qs_lo, l < 2u);
let byte_pos = (l % 2u) * 2u;
let qs0 = (qs_word >> (byte_pos * 8u)) & 0xFFu;
let qs1 = (qs_word >> ((byte_pos + 1u) * 8u)) & 0xFFu;
let grid_idx_1 = qs0 | (((qh_byte >> (2u * l)) & 1u) << 8u);
let grid_idx_2 = qs1 | (((qh_byte >> (2u * l + 1u)) & 1u) << 8u);
let sign_byte = get_byte(sg_w, l);
let grid1 = iq3s_grid[grid_idx_1];
let grid2 = iq3s_grid[grid_idx_2];
for (var j = 0u; j < 4u; j++) {
let b1 = f32((grid1 >> (j * 8u)) & 0xFFu);
let b2 = f32((grid2 >> (j * 8u)) & 0xFFu);
let s1 = select(1.0, -1.0, ((sign_byte >> j) & 1u) != 0u);
let s2 = select(1.0, -1.0, ((sign_byte >> (j + 4u)) & 1u) != 0u);
row_sum += db * b1 * s1 * x_block[ll * 8u + j];
row_sum += db * b2 * s2 * x_block[ll * 8u + j + 4u];
}
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ4_NL
#define BLOCK_SIZE 32
#define BLOCK_SIZE_BYTES 18
#define THREADS_PER_BLOCK 4
#define ELEMS_PER_THREAD (BLOCK_SIZE/THREADS_PER_BLOCK)
let num_blocks = params.k / BLOCK_SIZE;
let thread_within_block = thread_id % THREADS_PER_BLOCK;
for (var block = thread_id / THREADS_PER_BLOCK; block < num_blocks; block += WG_SIZE / THREADS_PER_BLOCK) {
let x_base = src1_idx_base + block * BLOCK_SIZE + thread_within_block * 4u;
var x_block: array<f32, ELEMS_PER_THREAD>;
for (var i = 0u; i < ELEMS_PER_THREAD / 2u; i++) {
x_block[i] = f32(src1[x_base + i]);
x_block[i + 4u] = f32(src1[x_base + i + 16u]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
var row_sum = 0.0;
let q_packed = load_u32_at_src0(block_byte_base + 2u + 4u * thread_within_block);
for (var byte_idx = 0u; byte_idx < 4u; byte_idx++) {
let q_byte = get_byte(q_packed, byte_idx);
let q_lo = f32(kvalues_iq4nl[q_byte & 0xFu]) * d;
let q_hi = f32(kvalues_iq4nl[(q_byte >> 4u) & 0xFu]) * d;
row_sum += q_lo * x_block[byte_idx];
row_sum += q_hi * x_block[byte_idx + 4u];
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef MUL_ACC_IQ4_XS
#define BLOCK_SIZE 256
#define BLOCK_SIZE_BYTES 136
#define THREADS_PER_BLOCK 16
let tid = thread_id % THREADS_PER_BLOCK;
let block_group = thread_id / THREADS_PER_BLOCK;
let num_block_groups: u32 = WG_SIZE / THREADS_PER_BLOCK;
let sub_blk = tid / 2u;
let half = tid % 2u;
let y_offset = sub_blk * 32u + half * 16u;
let num_blocks = params.k / BLOCK_SIZE;
for (var block = block_group; block < num_blocks; block += num_block_groups) {
let x_base = src1_idx_base + block * BLOCK_SIZE + y_offset;
var x_block: array<f32, 16>;
for (var i = 0u; i < 16u; i++) {
x_block[i] = f32(src1[x_base + i]);
}
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let output_row = row_base + row;
if (output_row < params.m) {
let block_byte_base = (src0_batch_offset + output_row * params.stride_01 + block) * BLOCK_SIZE_BYTES;
let d = f32(load_f16_at_src0(block_byte_base));
let scales_h = load_u16_at_src0(block_byte_base + 2u);
let scales_l_word = load_u32_at_src0(block_byte_base + 4u);
let sl_byte = get_byte(scales_l_word, sub_blk / 2u);
let sl = (sl_byte >> (4u * (sub_blk % 2u))) & 0xFu;
let sh_bits = (scales_h >> (2u * sub_blk)) & 3u;
let ls = i32(sl | (sh_bits << 4u));
let dl = d * f32(ls - 32);
let qs_byte_off = 8u + sub_blk * 16u;
let q_w0 = load_u32_at_src0(block_byte_base + qs_byte_off);
let q_w1 = load_u32_at_src0(block_byte_base + qs_byte_off + 4u);
let q_w2 = load_u32_at_src0(block_byte_base + qs_byte_off + 8u);
let q_w3 = load_u32_at_src0(block_byte_base + qs_byte_off + 12u);
var row_sum = 0.0;
for (var i = 0u; i < 16u; i++) {
let q_word = select(
select(q_w0, q_w1, i >= 4u),
select(q_w2, q_w3, i >= 12u),
i >= 8u);
let q_byte = get_byte(q_word, i % 4u);
let nib = select(q_byte & 0xFu, (q_byte >> 4u) & 0xFu, half == 1u);
row_sum += f32(kvalues_iq4nl[nib]) * dl * x_block[i];
}
acc[row] += row_sum;
}
}
}
#endif
#ifdef USE_SUBGROUP_REDUCTION
for (var row = 0u; row < OUTPUTS_PER_WG; row++) {
let subgroup_total = subgroupAdd(acc[row]);

View File

@@ -68,11 +68,19 @@ dir=$(basename $(pwd))
git branch -D pr/$PR 2> /dev/null
git worktree add -b pr/$PR ../$dir-pr-$PR pr/$PR/$head_ref 2> /dev/null
og_path=$(pwd)
wt_path=$(cd ../$dir-pr-$PR && pwd)
echo "git worktree created in $wt_path"
cd $wt_path
# pi agent setup in the worktree
if [[ -f "$og_path/.pi/SYSTEM.md" && ! -f ".pi/SYSTEM.md" ]]; then
mkdir -p .pi
ln -sfn "$og_path/.pi/SYSTEM.md" .pi/SYSTEM.md
fi
git branch --set-upstream-to=pr/$PR/$head_ref
git pull --ff-only || {
echo "error: failed to pull pr/$PR"

View File

@@ -72,9 +72,6 @@ llm_build_llama<embed>::llm_build_llama(const llama_model & model, const llm_gra
cur = build_attn(inp_attn,
model.layers[il].wo, model.layers[il].wo_b, model.layers[il].wo_s,
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
if (model.layers[il].wo_s) {
cur = ggml_mul(ctx0, cur, model.layers[il].wo_s);
}
cb(cur, "attn_out", il);
}
if (il == n_layer - 1 && inp_out_ids) {

View File

@@ -58,9 +58,6 @@ llm_build_qwen3::llm_build_qwen3(const llama_model & model, const llm_graph_para
cur = build_attn(inp_attn,
model.layers[il].wo, model.layers[il].wo_b, model.layers[il].wo_s,
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
if (model.layers[il].wo_s) {
cur = ggml_mul(ctx0, cur, model.layers[il].wo_s);
}
}
if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);

View File

@@ -58,9 +58,6 @@ llm_build_qwen3moe::llm_build_qwen3moe(const llama_model & model, const llm_grap
cur = build_attn(inp_attn,
model.layers[il].wo, model.layers[il].wo_b, model.layers[il].wo_s,
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
if (model.layers[il].wo_s) {
cur = ggml_mul(ctx0, cur, model.layers[il].wo_s);
}
}
if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);

View File

@@ -2249,6 +2249,46 @@ static void test_template_output_peg_parsers(bool detailed_debug) {
.reasoning_format(COMMON_REASONING_FORMAT_AUTO)
.expect(message_assist)
.run();
{
// additional tests for https://github.com/ggml-org/llama.cpp/pull/21760
auto tmpls = read_templates("models/templates/google-gemma-4-31B-it.jinja");
common_chat_msg tool_call_msg = simple_assist_msg(
"Let me check.", "", "special_function", "{\"arg1\": 1}","c0");
common_chat_msg tool_msg;
tool_msg.role = "tool";
tool_msg.tool_name = "special_function";
tool_msg.tool_call_id = "c0";
tool_msg.content = "{\"r\":\"ok\"}";
{
common_chat_templates_inputs inputs;
inputs.messages = { message_user, tool_call_msg, tool_msg };
inputs.tools = { special_function_tool };
inputs.add_generation_prompt = true;
auto params = common_chat_templates_apply(tmpls.get(), inputs);
if (!string_ends_with(params.prompt, "<turn|>\n<|turn>model\n")) {
throw std::runtime_error("Missing generation prompt for Gemma 4");
}
}
{
common_chat_templates_inputs inputs;
inputs.messages = { message_user, tool_call_msg, tool_msg };
inputs.tools = { special_function_tool };
inputs.add_generation_prompt = false;
auto params = common_chat_templates_apply(tmpls.get(), inputs);
if (string_ends_with(params.prompt, "<|turn>model\n")) {
throw std::runtime_error("Gemma 4: generation prompt was modified despite add_generation_prompt=false");
}
}
}
}
{

View File

@@ -72,7 +72,7 @@ int main(int argc, char ** argv) {
mtmd::context_ptr ctx_mtmd;
common_init_result_ptr llama_init;
base_callback_data cb_data;
common_debug_cb_user_data cb_data;
llama_init = common_init_from_params(params);
{
@@ -89,7 +89,7 @@ int main(int argc, char ** argv) {
{
// always enable debug callback
mparams.cb_eval_user_data = &cb_data;
mparams.cb_eval = common_debug_cb_eval<false>;
mparams.cb_eval = common_debug_cb_eval;
}
ctx_mtmd.reset(mtmd_init_from_file(clip_path, model, mparams));
if (!ctx_mtmd.get()) {

View File

@@ -90,7 +90,7 @@ struct mtmd_cli_context {
int n_threads = 1;
llama_pos n_past = 0;
base_callback_data cb_data;
common_debug_cb_user_data cb_data;
mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
model = llama_init->model();
@@ -145,7 +145,7 @@ struct mtmd_cli_context {
mparams.image_max_tokens = params.image_max_tokens;
if (std::getenv("MTMD_DEBUG_GRAPH") != nullptr) {
mparams.cb_eval_user_data = &cb_data;
mparams.cb_eval = common_debug_cb_eval<false>;
mparams.cb_eval = common_debug_cb_eval;
}
ctx_vision.reset(mtmd_init_from_file(clip_path, model, mparams));
if (!ctx_vision.get()) {

View File

@@ -317,7 +317,7 @@ int main(int argc, char * argv[]) {
const char * cache_dir = nullptr;
std::string cache_dir_str;
if (params.use_cache) {
cache_dir_str = fs_get_cache_directory() + "rpc/";
cache_dir_str = fs_get_cache_directory() + "rpc" + DIRECTORY_SEPARATOR;
if (!fs_create_directory_with_parents(cache_dir_str)) {
fprintf(stderr, "Failed to create cache directory: %s\n", cache_dir_str.c_str());
return 1;