Skip to content

Commit 309ef24

Browse files
committed
Fix unnecessary high llama-3 VRAM use
1 parent 9bcf695 commit 309ef24

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

ggml-vulkan.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -5288,7 +5288,7 @@ static void ggml_vk_preallocate_buffers_graph(ggml_backend_vk_context * ctx, ggm
52885288

52895289
bool mmp = (use_src0 && use_src1 && src1_type == GGML_TYPE_F32) ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0_type, y_non_contig ? GGML_TYPE_F16 : src1->type) != nullptr : false;
52905290

5291-
const bool qx_needs_dequant = use_src0 && (mmp || x_non_contig);
5291+
const bool qx_needs_dequant = use_src0 && (!mmp || x_non_contig);
52925292
const bool qy_needs_dequant = use_src1 && ((src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig);
52935293

52945294
int split_k;

0 commit comments

Comments
 (0)