Skip to content

Commit 63e5fbb

Browse files
author
bedwards
committed
upgrade llama.cpp to b1742
1 parent 4922cac commit 63e5fbb

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

binding.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,7 @@ void *llama_allocate_params(const char *prompt, int seed, int threads, int token
637637
params->prompt_cache_ro = prompt_cache_ro;
638638
params->sparams.top_k = top_k;
639639
params->sparams.top_p = top_p;
640-
params->memory_f16 = memory_f16;
640+
// params->memory_f16 = memory_f16;
641641
params->sparams.temp = temp;
642642
params->use_mmap = mmap;
643643
params->use_mlock = mlock;
@@ -712,7 +712,7 @@ void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool
712712

713713
lparams.n_ctx = n_ctx;
714714
lparams.seed = n_seed;
715-
lparams.f16_kv = memory_f16;
715+
// lparams.f16_kv = memory_f16;
716716
lparams.embedding = embeddings;
717717
mparams.use_mlock = mlock;
718718
mparams.n_gpu_layers = n_gpu_layers;

llama.cpp

0 commit comments

Comments
 (0)