Skip to content

Commit

Permalink
add lora in minicpmv-cli
Browse files Browse the repository at this point in the history
  • Loading branch information
tc-mb committed Sep 29, 2024
1 parent 936faf8 commit f21723a
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 0 deletions.
5 changes: 5 additions & 0 deletions convert_lora_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ def get_base_tensor_name(lora_tensor_name: str) -> str:
base_name = lora_tensor_name.replace("base_model.model.", "")
base_name = base_name.replace(".lora_A.weight", ".weight")
base_name = base_name.replace(".lora_B.weight", ".weight")
# base_name = lora_tensor_name.replace("base_model.model.llm.", "")
# base_name = base_name.replace(".lora_A.default.weight", ".weight")
# base_name = base_name.replace(".lora_B.default.weight", ".weight")
return base_name


Expand Down Expand Up @@ -340,6 +343,8 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
base_name = get_base_tensor_name(name)
is_lora_a = ".lora_A.weight" in name
is_lora_b = ".lora_B.weight" in name
# is_lora_a = ".lora_A.default.weight" in name
# is_lora_b = ".lora_B.default.weight" in name
if not is_lora_a and not is_lora_b:
if ".base_layer.weight" in name:
continue
Expand Down
19 changes: 19 additions & 0 deletions examples/llava/minicpmv-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,25 @@ static struct llava_context * llava_init_context(gpt_params * params) {
return NULL;
}

// // load and optionally apply lora adapters
// for (auto & la : params->lora_adapters) {
// llama_lora_adapter_container loaded_la;
// loaded_la.path = la.path;
// loaded_la.scale = la.scale;
// loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str());
// if (loaded_la.adapter == nullptr) {
// fprintf(stderr, "%s: error: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
// // llama_free(lctx);
// // llama_free_model(model);
// // return iparams;
// return NULL;
// }
// iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters
// }
// if (!params->lora_init_without_apply) {
// llama_lora_adapters_apply(ctx_llama, iparams.lora_adapters);
// }

auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));

ctx_llava->ctx_llama = ctx_llama;
Expand Down

0 comments on commit f21723a

Please sign in to comment.