From 7fb809b94bb8b3d31c2db9582bca42dd62216284 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Thu, 7 Sep 2023 14:19:42 +0800 Subject: [PATCH] fixed auto rope scaling (+1 squashed commits) Squashed commits: [b1767874] wip --- expose.cpp | 33 ++++++++++++++++----------------- gpttype_adapter.cpp | 12 ++++++------ koboldcpp.py | 2 +- model_adapter.cpp | 14 ++++++++++++-- model_adapter.h | 9 +++++++-- 5 files changed, 42 insertions(+), 28 deletions(-) diff --git a/expose.cpp b/expose.cpp index df75fac1f880b..d385ffcb7b221 100644 --- a/expose.cpp +++ b/expose.cpp @@ -27,6 +27,7 @@ extern "C" //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) static FileFormat file_format = FileFormat::BADFORMAT; + static FileFormatExtraMeta file_format_meta; bool load_model(const load_model_inputs inputs) { @@ -36,11 +37,9 @@ extern "C" int forceversion = inputs.forceversion; - if(forceversion==0) - { - file_format = check_file_format(model.c_str()); - } - else + file_format = check_file_format(model.c_str(),&file_format_meta); + + if(forceversion!=0) { printf("\nWARNING: FILE FORMAT FORCED TO VER %d\nIf incorrect, loading may fail or crash.\n",forceversion); file_format = (FileFormat)forceversion; @@ -64,7 +63,7 @@ extern "C" if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2 || file_format==FileFormat::GPTJ_3 || file_format==FileFormat::GPTJ_4 || file_format==FileFormat::GPTJ_5) { printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format); - ModelLoadResult lr = gpttype_load_model(inputs, file_format); + ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta); if (lr == ModelLoadResult::RETRY_LOAD) { if(file_format==FileFormat::GPTJ_1) @@ -73,14 +72,14 @@ extern "C" //otherwise if we tried 3 first, then try 2 file_format = FileFormat::GPTJ_4; printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } if (lr == ModelLoadResult::RETRY_LOAD) { file_format = FileFormat::GPTJ_3; printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } //lastly try format 2 @@ -88,7 +87,7 @@ extern "C" { file_format = FileFormat::GPTJ_2; printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } } @@ -104,18 +103,18 @@ extern "C" else if(file_format==FileFormat::GPT2_1||file_format==FileFormat::GPT2_2||file_format==FileFormat::GPT2_3||file_format==FileFormat::GPT2_4) { printf("\n---\nIdentified as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format); - ModelLoadResult lr = gpttype_load_model(inputs, file_format); + ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta); if (lr == ModelLoadResult::RETRY_LOAD) { file_format = FileFormat::GPT2_3; printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } if (lr == ModelLoadResult::RETRY_LOAD) { file_format = FileFormat::GPT2_2; printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD) { @@ -129,27 +128,27 @@ extern "C" else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5 || file_format==FileFormat::NEOX_6 || file_format==FileFormat::NEOX_7) { printf("\n---\nIdentified as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format); - ModelLoadResult lr = gpttype_load_model(inputs, file_format); + ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta); if (lr == ModelLoadResult::RETRY_LOAD) { if(file_format==FileFormat::NEOX_2) { file_format = FileFormat::NEOX_3; printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } else { file_format = FileFormat::NEOX_5; printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } } if (lr == ModelLoadResult::RETRY_LOAD) { file_format = FileFormat::NEOX_1; printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format); - lr = gpttype_load_model(inputs, file_format); + lr = gpttype_load_model(inputs, file_format, file_format_meta); } if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD) { @@ -178,7 +177,7 @@ extern "C" { printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format); } - ModelLoadResult lr = gpttype_load_model(inputs, file_format); + ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta); if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD) { return false; diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 4a010c7de4f16..a8ebbd25f6f8b 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -393,7 +393,7 @@ static std::string RemoveBell(const std::string & input) //removes the bell char return word2; } -ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format) +ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta file_format_meta) { ggml_time_init(); @@ -438,11 +438,11 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in { //approximate NTK aware ctx auto effectivenctx = params.n_ctx; - // if((file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) && llama_ctx_v4->model.hparams.n_ctx_train>2048) - // { - // float factor = llama_ctx_v4->model.hparams.n_ctx_train/2048; - // effectivenctx = effectivenctx/factor; - // } + if((file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) && file_format_meta.n_ctx_train > 2048) + { + float factor = file_format_meta.n_ctx_train/2048; + effectivenctx = effectivenctx/factor; + } rope_freq_base = (effectivenctx <= 3072 ? 26000.0f : (effectivenctx <= 4096 ? 32000.0f : (effectivenctx <= 6144 ? 54000.0f : (effectivenctx <= 8192 ? 82684.0f : (effectivenctx <= 12288 ? 140000.0f : 200000.0f))))); } diff --git a/koboldcpp.py b/koboldcpp.py index f1b1a70b7ef3d..a090c1a8dea68 100755 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -330,7 +330,7 @@ def utfprint(str): maxhordelen = 256 modelbusy = threading.Lock() defaultport = 5001 -KcppVersion = "1.42.1" +KcppVersion = "1.43" showdebug = True showsamplerwarning = True showmaxctxwarning = True diff --git a/model_adapter.cpp b/model_adapter.cpp index 0f4d61e8e1047..5c3a3f95fb14e 100644 --- a/model_adapter.cpp +++ b/model_adapter.cpp @@ -80,7 +80,7 @@ void print_tok_vec(std::vector &embd) } //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) - FileFormat check_file_format(const std::string & fname) + FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta) { std::vector f_buf(1024*1024); @@ -266,7 +266,7 @@ void print_tok_vec(std::vector &embd) auto keyidx = gguf_find_key(ctx, "general.architecture"); std::string modelarch = ""; if (keyidx != -1) { modelarch = gguf_get_val_str(ctx, keyidx); } - gguf_free(ctx); + if(modelarch=="llama") { fileformat = FileFormat::GGUF_LLAMA; @@ -280,6 +280,16 @@ void print_tok_vec(std::vector &embd) { printf("\nERROR: Detected unimplemented GGUF Arch: %s\n",modelarch.c_str()); } + + if(modelarch!="" && fileformatmeta!=nullptr) + { + std::string fkey = modelarch+".context_length"; + auto keyidx = gguf_find_key(ctx, fkey.c_str()); + if (keyidx != -1) { + fileformatmeta->n_ctx_train = gguf_get_val_u32(ctx, keyidx); + } + } + gguf_free(ctx); } if(fin.is_open()) diff --git a/model_adapter.h b/model_adapter.h index 5618d3592ae80..fa596700a8df1 100644 --- a/model_adapter.h +++ b/model_adapter.h @@ -51,6 +51,11 @@ enum FileFormat }; +struct FileFormatExtraMeta +{ + int n_ctx_train = 2048; +}; + enum ModelLoadResult { FAIL = 0, @@ -58,7 +63,7 @@ enum ModelLoadResult RETRY_LOAD = 2, //used if it's suspected that the model is an older format }; -ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format); +ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta file_format_meta); generation_outputs gpttype_generate(const generation_inputs inputs, generation_outputs &output); bool gpttype_generate_abort(); const std::string & gpttype_get_pending_output(); @@ -73,7 +78,7 @@ std::vector LongestCommonSubseq(const std::vector x, const std::vector bool ArrStartWith(const std::vector targetArray, const std::vector searchSeq); int ArrFindIndexOf(const std::vector targetArray, const std::vector searchSeq); -FileFormat check_file_format(const std::string & fname); +FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta); void ContextFastForward(std::vector ¤t_context_tokens, std::vector &embd_inp, int &n_past, std::vector &last_n_tokens, const int nctx, std::vector &smartcontext, const bool useSmartContext, const bool requireFullSubset); \ No newline at end of file