Skip to content

Commit

Permalink
fix issues for merging
Browse files Browse the repository at this point in the history
  • Loading branch information
tc-mb committed Jul 17, 2024
1 parent 3e6348b commit c5b6851
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 24 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ poetry.toml
/tests/test-tokenizer-0
/tests/test-tokenizer-1-bpe
/tests/test-tokenizer-1-spm
/openbmb

# Scripts
!/scripts/install-oneapi.bat
Binary file removed examples/llava/assets/xiaomi14pro_test.jpeg
Binary file not shown.
31 changes: 23 additions & 8 deletions examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ struct clip_ctx {
ggml_gallocr_t compute_alloc = NULL;
};

static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, std::pair<int, int> load_image_size = {448, 448}, bool is_inf = false) {
static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct load_image_size * load_image_size, bool is_inf = false) {
if (!ctx->has_vision_encoder) {
LOG_TEE("This gguf file seems to have no vision encoder\n");
return nullptr;
Expand All @@ -567,8 +567,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
int image_size_width = image_size;
int image_size_height = image_size;
if (ctx->has_minicpmv_projector) {
image_size_width = load_image_size.first;
image_size_height = load_image_size.second;
if(load_image_size==nullptr){
load_image_size= load_image_size_init();
}
LOG_TEE("%s : %d %d\n", __func__, load_image_size->image_size_width, load_image_size->image_size_height);
image_size_width = load_image_size->image_size_width;
image_size_height = load_image_size->image_size_height;
if (is_inf){
image_size_width = imgs->data->nx;
image_size_height = imgs->data->ny;
Expand Down Expand Up @@ -995,7 +999,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
}

// read and create ggml_context containing the tensors and their data
struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1, std::pair<int, int> load_image_size) {
struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1, struct load_image_size * load_image_size) {
struct ggml_context * meta = NULL;

struct gguf_init_params params = {
Expand Down Expand Up @@ -1464,6 +1468,13 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1, s
return new_clip;
}

struct load_image_size * load_image_size_init() {
struct load_image_size * load_image_size = new struct load_image_size();
load_image_size->image_size_width = 448;
load_image_size->image_size_height = 448;
return load_image_size;
}

struct clip_image_u8 * clip_image_u8_init() {
return new clip_image_u8();
}
Expand Down Expand Up @@ -2058,7 +2069,7 @@ static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, co
return pos_embed_2d;
}

bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec, std::pair<int, int> load_image_size) {
bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec, struct load_image_size * load_image_size) {
if (!ctx->has_vision_encoder) {
LOG_TEE("This gguf file seems to have no vision encoder\n");
return false;
Expand All @@ -2070,7 +2081,7 @@ bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f3
return clip_image_batch_encode(ctx, n_threads, &imgs, vec, load_image_size);
}

bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec, std::pair<int, int> load_image_size) {
bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec, struct load_image_size * load_image_size) {
if (!ctx->has_vision_encoder) {
LOG_TEE("This gguf file seems to have no vision encoder\n");
return false;
Expand Down Expand Up @@ -2148,8 +2159,12 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
// -> https://huggingface.co/Qwen/Qwen-VL/tree/main
// -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed");
int pos_w = load_image_size.first/patch_size;
int pos_h = load_image_size.second/patch_size;
if(load_image_size==nullptr){
load_image_size= load_image_size_init();
}
LOG_TEE("%s : %d %d\n", __func__, load_image_size->image_size_width, load_image_size->image_size_height);
int pos_w = load_image_size->image_size_width/patch_size;
int pos_h = load_image_size->image_size_height/patch_size;
int embed_dim = 4096;
auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));

Expand Down
12 changes: 8 additions & 4 deletions examples/llava/clip.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

#include <stddef.h>
#include <stdint.h>
#include <utility>

#ifdef LLAMA_SHARED
# if defined(_WIN32) && !defined(__MINGW32__)
Expand All @@ -27,6 +26,10 @@ extern "C" {

struct clip_ctx;

struct load_image_size {
int image_size_width;
int image_size_height;
};
struct clip_image_u8_batch {
struct clip_image_u8 * data;
size_t size;
Expand All @@ -37,7 +40,7 @@ struct clip_image_f32_batch {
size_t size;
};

CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity, std::pair<int, int> load_image_size = {448, 448});
CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity, struct load_image_size * load_image_size = nullptr);
CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity);

CLIP_API void clip_free(struct clip_ctx * ctx);
Expand All @@ -56,6 +59,7 @@ CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx);
CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);

CLIP_API struct load_image_size * load_image_size_init();
CLIP_API struct clip_image_u8 * clip_image_u8_init ();
CLIP_API struct clip_image_f32 * clip_image_f32_init();

Expand All @@ -76,8 +80,8 @@ CLIP_API void uhd_normalize_image_u8_to_f32(struct clip_ctx * ctx, const clip_im

CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);

CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec, std::pair<int, int> load_image_size = {448, 448});
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec, std::pair<int, int> load_image_size = {448, 448});
CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec, struct load_image_size * load_image_size = nullptr);
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec, struct load_image_size * load_image_size = nullptr);

CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);

Expand Down
14 changes: 9 additions & 5 deletions examples/llava/llava.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ struct clip_image_grid_shape {
int second;
};

struct uhd_image_embed {
std::vector<std::vector<struct llava_image_embed *>> image_embeds;
};
/**
* Selects the best resolution from a list of possible resolutions based on the original size.
*
Expand Down Expand Up @@ -410,7 +413,7 @@ void llava_image_embed_free(struct llava_image_embed * embed) {
free(embed);
}

static bool encode_image_with_clip_uhd(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos, std::pair<int, int> load_image_size) {
static bool encode_image_with_clip_uhd(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos, struct load_image_size * load_image_size) {
// std::vector<clip_image_f32*> img_res_v;
// format VectN x H x W x RGB (N x 448 x 448 x 3)
clip_image_f32 * img_res_v = clip_image_f32_init();
Expand Down Expand Up @@ -683,9 +686,10 @@ struct uhd_image_embed * llava_image_embed_make_with_bytes_uhd(struct clip_ctx *
float* image_embed = NULL;
int n_image_pos = 0;
int patch_size=14;
std::pair<int, int> load_image_size;
load_image_size.first = imgs[i][j]->nx;
load_image_size.second = imgs[i][j]->ny;
struct load_image_size * load_image_size = load_image_size_init();
load_image_size->image_size_width = imgs[i][j]->nx;
load_image_size->image_size_height = imgs[i][j]->ny;
LOG_TEE("%s : %d %d\n", __func__, load_image_size->image_size_width, load_image_size->image_size_height);
bool image_embed_result = llava_image_embed_make_with_clip_img_uhd(ctx_clip, n_threads, only_v2_5_reshape_by_patch(imgs[i][j], patch_size), &image_embed, &n_image_pos, load_image_size);
if (!image_embed_result) {
LOG_TEE("%s: coulnd't embed the image\n", __func__);
Expand All @@ -701,7 +705,7 @@ struct uhd_image_embed * llava_image_embed_make_with_bytes_uhd(struct clip_ctx *
return results;
}

bool llava_image_embed_make_with_clip_img_uhd(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out, std::pair<int, int> load_image_size) {
bool llava_image_embed_make_with_clip_img_uhd(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out, struct load_image_size * load_image_size) {
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*6); // TODO: base on gridsize/llava model
if (!image_embd) {
LOG_TEE("Unable to allocate memory for image embeddings\n");
Expand Down
8 changes: 3 additions & 5 deletions examples/llava/llava.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,13 @@
#endif

struct clip_ctx;

struct uhd_image_embed {
std::vector<std::vector<struct llava_image_embed *>> image_embeds;
};
struct uhd_image_embed;

#ifdef __cplusplus
extern "C" {
#endif

struct uhd_image_embed;
struct llava_image_embed {
float * embed;
int n_image_pos;
Expand All @@ -47,7 +45,7 @@ LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed);
/** build an image embed from image file bytes */
LLAVA_API struct uhd_image_embed * llava_image_embed_make_with_bytes_uhd(struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img);
/** build an image embed from a path to an image filename */
LLAVA_API bool llava_image_embed_make_with_clip_img_uhd(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out, std::pair<int, int> load_image_size = {448, 448});
LLAVA_API bool llava_image_embed_make_with_clip_img_uhd(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out, struct load_image_size * load_image_size = nullptr);
LLAVA_API bool llava_image_embed_make_with_clip_img_ollama(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
LLAVA_API struct uhd_image_embed * llava_image_embed_make_with_filename_uhd(struct clip_ctx * ctx_clip, int n_threads, const char * image_path);
LLAVA_API void llava_image_embed_free_uhd(struct uhd_image_embed * embed);
Expand Down
4 changes: 4 additions & 0 deletions examples/llava/minicpmv-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@
#include <cstdlib>
#include <vector>

struct uhd_image_embed {
std::vector<std::vector<struct llava_image_embed *>> image_embeds;
};

static void show_additional_info(int /*argc*/, char ** argv) {
LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
Expand Down
2 changes: 1 addition & 1 deletion examples/llava/minicpmv_wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ struct clip_ctx * clip_init_context(gpt_params * params) {
if (prompt.empty()) {
prompt = "describe the image in detail.";
}
std::pair<int, int> load_image_size = std::make_pair(448, 448);
struct load_image_size * load_image_size = load_image_size_init();
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1, load_image_size);
return ctx_clip;
}
Expand Down

0 comments on commit c5b6851

Please sign in to comment.