Skip to content

Commit

Permalink
remove the extern "C", MINICPMV_API
Browse files Browse the repository at this point in the history
  • Loading branch information
tc-mb committed Jul 23, 2024
1 parent fcde997 commit 6fd0937
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 16 deletions.
2 changes: 1 addition & 1 deletion examples/llava/minicpmv-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ static struct minicpmv_context * minicpmv_init(gpt_params * params, const std::s
LOG_TEE("\n%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);

const int64_t t_process_image_start_us = ggml_time_us();
process_image(ctx_llava, image_embed_slices, params, n_past);
process_image(ctx_llava, embeds, params, n_past);
const int64_t t_process_image_end_us = ggml_time_us();
float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
LOG_TEE("\n%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
Expand Down
21 changes: 12 additions & 9 deletions examples/llava/minicpmv-wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
#include <cstdlib>
#include <vector>

struct uhd_image_embed {
std::vector<std::vector<struct llava_image_embed *>> image_embeds;
};

struct llama_model * llava_init(gpt_params * params) {
llama_backend_init();
llama_numa_init(params->numa);
Expand Down Expand Up @@ -64,8 +68,7 @@ struct clip_ctx * clip_init_context(gpt_params * params) {
if (prompt.empty()) {
prompt = "describe the image in detail.";
}
struct clip_image_size * load_image_size = clip_image_size_init();
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1, load_image_size);
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
return ctx_clip;
}

Expand Down Expand Up @@ -108,22 +111,22 @@ bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch,
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
}

void process_image(struct minicpmv_context * ctx_llava, std::vector<std::vector<struct llava_image_embed *>> image_embed_slices, gpt_params * params, int &n_past) {
void process_image(struct minicpmv_context * ctx_llava, struct uhd_image_embed * image_embed_slices, gpt_params * params, int &n_past) {
std::string system_prompt;

system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
LOG_TEE("%s: image token past: %d\n", __func__, n_past);
eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false);
if (image_embed_slices.size() > 1) {
if (image_embed_slices->image_embeds.size() > 1) {
eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
for (size_t i = 1; i < image_embed_slices.size(); ++i) {
for (size_t j = 0; j < image_embed_slices[i].size(); ++j) {
for (size_t i = 1; i < image_embed_slices->image_embeds.size(); ++i) {
for (size_t j = 0; j < image_embed_slices->image_embeds[i].size(); ++j) {
eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
if (j == image_embed_slices[i].size() - 1) {
if (j == image_embed_slices->image_embeds[i].size() - 1) {
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
}
}
Expand Down
12 changes: 6 additions & 6 deletions examples/llava/minicpmv-wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
# define MINICPMV_API
#endif

bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past);
bool eval_id(struct llama_context * ctx_llama, int id, int * n_past);
bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos);
void process_image(struct minicpmv_context * ctx_llava, struct uhd_image_embed * image_embed_slices, gpt_params * params, int &n_past);
const char * sample(struct llama_sampling_context * ctx_sampling, struct llama_context * ctx_llama, int * n_past);

#ifdef __cplusplus
extern "C" {
#endif
Expand All @@ -36,12 +42,6 @@ MINICPMV_API void llava_free(struct minicpmv_context * ctx_llava);
MINICPMV_API struct clip_ctx * clip_init_context(gpt_params * params);
MINICPMV_API struct uhd_image_embed * minicpmv_image_embed(gpt_params * params, const std::string & fname);

MINICPMV_API bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past);
MINICPMV_API bool eval_id(struct llama_context * ctx_llama, int id, int * n_past);
MINICPMV_API bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos);
MINICPMV_API void process_image(struct minicpmv_context * ctx_llava, std::vector<std::vector<struct llava_image_embed *>> image_embed_slices, gpt_params * params, int &n_past);
MINICPMV_API const char * sample(struct llama_sampling_context * ctx_sampling, struct llama_context * ctx_llama, int * n_past);

#ifdef __cplusplus
}
#endif
Expand Down

0 comments on commit 6fd0937

Please sign in to comment.