Skip to content

Commit

Permalink
llama.cpp updated to b2692
Browse files Browse the repository at this point in the history
  • Loading branch information
guinmoon committed Apr 18, 2024
1 parent 20d85da commit 56c69dd
Show file tree
Hide file tree
Showing 46 changed files with 51,153 additions and 10,624 deletions.
8 changes: 4 additions & 4 deletions Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ let package = Package(
name: "llmfarm_core_cpp",
sources: ["ggml/ggml.c","exception_helper.cpp","ggml/ggml-quants.c","ggml/ggml-alloc.c","ggml/ggml-backend.c","ggml/ggml-metal.m",
"ggml/common.cpp","ggml/sampling.cpp","ggml/train.cpp","ggml/build-info.cpp",
"gpt_helpers.cpp","gpt_spm.cpp","package_helper.m","grammar-parser.cpp","exception_helper_objc.mm","ggml/common_old.cpp",
"llava/llava.cpp","llava/clip.cpp",
"llava/llava-cli.cpp",
"finetune/finetune.cpp","finetune/export-lora.cpp","llama/llama.cpp",
"gpt_helpers.cpp","gpt_spm.cpp","package_helper.m","ggml/grammar-parser.cpp","exception_helper_objc.mm","ggml/common_old.cpp",
"llava/llava.cpp","llava/clip.cpp","llava/llava-cli.cpp","llama/unicode.cpp","llama/unicode-data.cpp","ggml/sgemm.cpp",
"ggml/json-schema-to-grammar.cpp","finetune/finetune.cpp","finetune/export-lora.cpp","llama/llama.cpp",
"ggml/ggml_d925ed.c","ggml/ggml_d925ed-alloc.c","ggml/ggml_d925ed-metal.m","rwkv/rwkv.cpp",
"ggml/ggml_dadbed9.c","ggml/k_quants_dadbed9.c","ggml/ggml-alloc_dadbed9.c","ggml/ggml-metal_dadbed9.m",
"gptneox/gptneox.cpp","gpt2/gpt2.cpp","replit/replit.cpp","starcoder/starcoder.cpp","llama/llama_dadbed9.cpp"
],
resources: [
.copy("tokenizers"),
.process("ggml-metal.metal"),
.copy("metal")
],
publicHeadersPath: "spm-headers",
Expand Down
2 changes: 1 addition & 1 deletion Sources/llmfarm_core/LLaMa.swift
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ public class LLaMa: LLMBase {
model_params.use_mmap = false
}

llama_backend_init(false)
llama_backend_init()

self.model = llama_load_model_from_file(path, model_params)
if self.model == nil{
Expand Down
2 changes: 1 addition & 1 deletion Sources/llmfarm_core/LLaMa_MModal.swift
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ public class LLaMa_MModal: LLaMa {
#if os(iOS)
self.clip_ctx = clip_model_load(contextParams.clip_model, 1, 0 );
#else
self.clip_ctx = clip_model_load(contextParams.clip_model, 1,contextParams.use_metal ? 1: 0 );
self.clip_ctx = clip_model_load(contextParams.clip_model, 1,contextParams.use_metal ? 1: 0);
#endif
return true
}
Expand Down
2 changes: 0 additions & 2 deletions Sources/llmfarm_core_cpp/finetune/export-lora.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
#include <string>
#include <thread>

static const size_t tensor_alignment = 32;

struct lora_info {
std::string filename;
float scale;
Expand Down
244 changes: 122 additions & 122 deletions Sources/llmfarm_core_cpp/finetune/finetune.cpp

Large diffs are not rendered by default.

Loading

0 comments on commit 56c69dd

Please sign in to comment.