From e83698b653dc332cc326f7aca2c8740eb5123e04 Mon Sep 17 00:00:00 2001 From: caitianchi Date: Thu, 8 Aug 2024 21:26:12 +0800 Subject: [PATCH] init --- Makefile | 20 +- examples/llava/llava.cpp | 4 +- examples/llava/minicpmv-cli.cpp | 334 +++++++-------- ...nicpmv2_0-convert-image-encoder-to-gguf.py | 405 ++++++++++++++++++ .../minicpmv-convert/minicpmv2_0-surgery.py | 48 +++ ...nicpmv2_5-convert-image-encoder-to-gguf.py | 2 +- 6 files changed, 636 insertions(+), 177 deletions(-) create mode 100644 examples/llava/minicpmv-convert/minicpmv2_0-convert-image-encoder-to-gguf.py create mode 100644 examples/llava/minicpmv-convert/minicpmv2_0-surgery.py diff --git a/Makefile b/Makefile index a157b1e1ceef8..e70564603b1ff 100644 --- a/Makefile +++ b/Makefile @@ -950,15 +950,21 @@ llama-llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/lla $(CXX) $(CXXFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp) $(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS) -FFMPEG_CFLAGS := $(shell pkg-config --cflags libavformat libavcodec libavutil) -FFMPEG_LIBS := $(shell pkg-config --libs libavformat libavcodec libavutil) -lswscale +# FFMPEG_CFLAGS := $(shell pkg-config --cflags libavformat libavcodec libavutil) +# FFMPEG_LIBS := $(shell pkg-config --libs libavformat libavcodec libavutil) -lswscale + +# llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) +# $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual +# $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp) +# $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS) $(FFMPEG_LIBS) llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual - $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp) - $(CXX) $(CXXFLAGS) $(FFMPEG_CFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS) $(FFMPEG_LIBS) - + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual + $(CXX) $(CXXFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp) + $(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS) + llama-baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index c97bd28f0b331..7432f4ca60595 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -252,7 +252,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli for (size_t i = 0; i < img_res_v.size; i++) { const int64_t t_img_enc_step_start_us = ggml_time_us(); image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); - int patch_size=14; + int patch_size = 14; load_image_size->width = img_res_v.data[i].nx; load_image_size->height = img_res_v.data[i].ny; clip_add_load_image_size(ctx_clip, load_image_size); @@ -261,7 +261,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli if (has_minicpmv_projector == 2) { encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]); } - else if (has_minicpmv_projector == 3) { + else { encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); } if (!encoded) { diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp index 6f46c2a1419b5..809e5c9ecd23c 100644 --- a/examples/llava/minicpmv-cli.cpp +++ b/examples/llava/minicpmv-cli.cpp @@ -9,12 +9,12 @@ #include #include -extern "C" { - #include - #include - #include - #include -} +// extern "C" { +// #include +// #include +// #include +// #include +// } struct llava_context { struct clip_ctx * ctx_clip = NULL; @@ -28,133 +28,133 @@ struct clip_image_u8 { std::vector buf; }; -static std::vector extract_frames(const std::string& video_path) { - AVFormatContext* format_ctx = nullptr; - if (avformat_open_input(&format_ctx, video_path.c_str(), nullptr, nullptr) < 0) { - LOG_TEE("Could not open video file."); - return {}; - } - - if (avformat_find_stream_info(format_ctx, nullptr) < 0) { - LOG_TEE("Could not find stream information."); - avformat_close_input(&format_ctx); - return {}; - } - - const AVCodec* codec = nullptr; - AVCodecContext* codec_ctx = nullptr; - int video_stream_index = -1; - - // Find the video stream - for (size_t i = 0; i < format_ctx->nb_streams; ++i) { - if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { - codec = avcodec_find_decoder(format_ctx->streams[i]->codecpar->codec_id); - if (codec) { - video_stream_index = i; - break; - } - } - } - - if (video_stream_index == -1) { - LOG_TEE("Could not find video stream."); - avformat_close_input(&format_ctx); - return {}; - } - - codec_ctx = avcodec_alloc_context3(codec); - if (!codec_ctx) { - LOG_TEE("Could not allocate video codec context."); - avformat_close_input(&format_ctx); - return {}; - } - - if (avcodec_parameters_to_context(codec_ctx, format_ctx->streams[video_stream_index]->codecpar) < 0) { - LOG_TEE("Could not copy codec parameters to codec context."); - avcodec_free_context(&codec_ctx); - avformat_close_input(&format_ctx); - return {}; - } - - if (avcodec_open2(codec_ctx, codec, nullptr) < 0) { - LOG_TEE("Could not open codec."); - avcodec_free_context(&codec_ctx); - avformat_close_input(&format_ctx); - return {}; - } - - AVFrame* frame = av_frame_alloc(); - AVFrame* frame_rgb = av_frame_alloc(); - if (!frame || !frame_rgb) { - LOG_TEE("Could not allocate frames."); - av_frame_free(&frame); - av_frame_free(&frame_rgb); - avcodec_free_context(&codec_ctx); - avformat_close_input(&format_ctx); - return {}; - } - - int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, 1); - uint8_t* buffer = (uint8_t*)av_malloc(num_bytes * sizeof(uint8_t)); - av_image_fill_arrays(frame_rgb->data, frame_rgb->linesize, buffer, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, 1); - - struct SwsContext* sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, - codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, - SWS_BILINEAR, nullptr, nullptr, nullptr); - - std::vector frames; - - AVPacket packet; - int64_t last_pts = AV_NOPTS_VALUE; - int64_t total_frames = format_ctx->streams[video_stream_index]->nb_frames; - // LOG_TEE("total_frames: %lld\n", total_frames); - - int64_t frame_interval = (int64_t)codec_ctx->framerate.num / codec_ctx->framerate.den; - // LOG_TEE("frame_interval: %lld\n", frame_interval); - // LOG_TEE("codec_ctx->framerate.num: %lld\n", codec_ctx->framerate.num); - // LOG_TEE("codec_ctx->framerate.den: %lld\n", codec_ctx->framerate.den); - - float frame_len = 1.0 * total_frames / frame_interval; - LOG_TEE("frame_len: %f\n", frame_len); - if(frame_len > 15){ - frame_interval = (int64_t)(1.0 * total_frames / 15); - } - // LOG_TEE("frame_interval: %lld\n", frame_interval); - int frame_idx = 0; - while (av_read_frame(format_ctx, &packet) >= 0) { - if (packet.stream_index == video_stream_index) { - if (avcodec_send_packet(codec_ctx, &packet) == 0) { - for(;avcodec_receive_frame(codec_ctx, frame) == 0;frame_idx++) { - // int frame_idx = frame->pts/codec_ctx->framerate.den; - // LOG_TEE("frame_idx: %d %d\n", frame_idx, frame_idx % frame_interval); - if (frame->pts != last_pts && (frame_idx) % frame_interval == 0) { - sws_scale(sws_ctx, frame->data, frame->linesize, 0, codec_ctx->height, - frame_rgb->data, frame_rgb->linesize); - - clip_image_u8 * img = clip_image_u8_init(); - img->nx = codec_ctx->width; - img->ny = codec_ctx->height; - img->buf.resize(num_bytes); - std::copy(buffer, buffer + num_bytes, img->buf.begin()); - - frames.push_back(img); - last_pts = frame->pts; - } - } - } - } - av_packet_unref(&packet); - } - - av_free(buffer); - av_frame_free(&frame_rgb); - av_frame_free(&frame); - avcodec_free_context(&codec_ctx); - avformat_close_input(&format_ctx); - sws_freeContext(sws_ctx); - - return frames; -} +// static std::vector extract_frames(const std::string& video_path) { +// AVFormatContext* format_ctx = nullptr; +// if (avformat_open_input(&format_ctx, video_path.c_str(), nullptr, nullptr) < 0) { +// LOG_TEE("Could not open video file."); +// return {}; +// } + +// if (avformat_find_stream_info(format_ctx, nullptr) < 0) { +// LOG_TEE("Could not find stream information."); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// const AVCodec* codec = nullptr; +// AVCodecContext* codec_ctx = nullptr; +// int video_stream_index = -1; + +// // Find the video stream +// for (size_t i = 0; i < format_ctx->nb_streams; ++i) { +// if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { +// codec = avcodec_find_decoder(format_ctx->streams[i]->codecpar->codec_id); +// if (codec) { +// video_stream_index = i; +// break; +// } +// } +// } + +// if (video_stream_index == -1) { +// LOG_TEE("Could not find video stream."); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// codec_ctx = avcodec_alloc_context3(codec); +// if (!codec_ctx) { +// LOG_TEE("Could not allocate video codec context."); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// if (avcodec_parameters_to_context(codec_ctx, format_ctx->streams[video_stream_index]->codecpar) < 0) { +// LOG_TEE("Could not copy codec parameters to codec context."); +// avcodec_free_context(&codec_ctx); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// if (avcodec_open2(codec_ctx, codec, nullptr) < 0) { +// LOG_TEE("Could not open codec."); +// avcodec_free_context(&codec_ctx); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// AVFrame* frame = av_frame_alloc(); +// AVFrame* frame_rgb = av_frame_alloc(); +// if (!frame || !frame_rgb) { +// LOG_TEE("Could not allocate frames."); +// av_frame_free(&frame); +// av_frame_free(&frame_rgb); +// avcodec_free_context(&codec_ctx); +// avformat_close_input(&format_ctx); +// return {}; +// } + +// int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, 1); +// uint8_t* buffer = (uint8_t*)av_malloc(num_bytes * sizeof(uint8_t)); +// av_image_fill_arrays(frame_rgb->data, frame_rgb->linesize, buffer, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, 1); + +// struct SwsContext* sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, +// codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, +// SWS_BILINEAR, nullptr, nullptr, nullptr); + +// std::vector frames; + +// AVPacket packet; +// int64_t last_pts = AV_NOPTS_VALUE; +// int64_t total_frames = format_ctx->streams[video_stream_index]->nb_frames; +// // LOG_TEE("total_frames: %lld\n", total_frames); + +// int64_t frame_interval = (int64_t)codec_ctx->framerate.num / codec_ctx->framerate.den; +// // LOG_TEE("frame_interval: %lld\n", frame_interval); +// // LOG_TEE("codec_ctx->framerate.num: %lld\n", codec_ctx->framerate.num); +// // LOG_TEE("codec_ctx->framerate.den: %lld\n", codec_ctx->framerate.den); + +// float frame_len = 1.0 * total_frames / frame_interval; +// LOG_TEE("frame_len: %f\n", frame_len); +// if(frame_len > 15){ +// frame_interval = (int64_t)(1.0 * total_frames / 15); +// } +// // LOG_TEE("frame_interval: %lld\n", frame_interval); +// int frame_idx = 0; +// while (av_read_frame(format_ctx, &packet) >= 0) { +// if (packet.stream_index == video_stream_index) { +// if (avcodec_send_packet(codec_ctx, &packet) == 0) { +// for(;avcodec_receive_frame(codec_ctx, frame) == 0;frame_idx++) { +// // int frame_idx = frame->pts/codec_ctx->framerate.den; +// // LOG_TEE("frame_idx: %d %d\n", frame_idx, frame_idx % frame_interval); +// if (frame->pts != last_pts && (frame_idx) % frame_interval == 0) { +// sws_scale(sws_ctx, frame->data, frame->linesize, 0, codec_ctx->height, +// frame_rgb->data, frame_rgb->linesize); + +// clip_image_u8 * img = clip_image_u8_init(); +// img->nx = codec_ctx->width; +// img->ny = codec_ctx->height; +// img->buf.resize(num_bytes); +// std::copy(buffer, buffer + num_bytes, img->buf.begin()); + +// frames.push_back(img); +// last_pts = frame->pts; +// } +// } +// } +// } +// av_packet_unref(&packet); +// } + +// av_free(buffer); +// av_frame_free(&frame_rgb); +// av_frame_free(&frame); +// avcodec_free_context(&codec_ctx); +// avformat_close_input(&format_ctx); +// sws_freeContext(sws_ctx); + +// return frames; +// } static void show_additional_info(int /*argc*/, char ** argv) { LOG_TEE("\n example usage: %s -m --mmproj [--video ] [--image ] [--image ] [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); @@ -471,39 +471,39 @@ int main(int argc, char ** argv) { int n_past = 0; struct llava_context * ctx_llava = nullptr; - if (params.video.size() > 0){ - ctx_llava = llava_init_context(¶ms); - auto video = params.video; - std::vector frames = extract_frames(video.c_str()); - process_prompt(0, ctx_llava, ¶ms, n_past); - // LOG_TEE("frames.size: %zu\n", frames.size()); - for (size_t i = 0; i < frames.size(); ++i) { - auto embeds = video_image_embed(ctx_llava->ctx_clip, ¶ms, frames[i]); - process_input(ctx_llava, ¶ms, 1, "", n_past, embeds); - } - process_input(ctx_llava, ¶ms, 0, params.prompt.c_str(), n_past); - process_prompt(2, ctx_llava, ¶ms, n_past); - - struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams); - const int max_tgt_len = params.n_predict < 0 ? 8192 : params.n_predict; - std::string response = ""; - bool have_tmp = false; - for (int i = 0; i < max_tgt_len; i++) { - auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past); - response += tmp; - if (strcmp(tmp, "") == 0){ - if(!have_tmp)continue; - else break; - } - have_tmp = true; - printf("%s", tmp); - if (strstr(response.c_str(), "")) break; // minicpm-v - - fflush(stdout); - } - llama_sampling_free(ctx_sampling); - } - else { + // if (params.video.size() > 0){ + // ctx_llava = llava_init_context(¶ms); + // auto video = params.video; + // std::vector frames = extract_frames(video.c_str()); + // process_prompt(0, ctx_llava, ¶ms, n_past); + // // LOG_TEE("frames.size: %zu\n", frames.size()); + // for (size_t i = 0; i < frames.size(); ++i) { + // auto embeds = video_image_embed(ctx_llava->ctx_clip, ¶ms, frames[i]); + // process_input(ctx_llava, ¶ms, 1, "", n_past, embeds); + // } + // process_input(ctx_llava, ¶ms, 0, params.prompt.c_str(), n_past); + // process_prompt(2, ctx_llava, ¶ms, n_past); + + // struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams); + // const int max_tgt_len = params.n_predict < 0 ? 8192 : params.n_predict; + // std::string response = ""; + // bool have_tmp = false; + // for (int i = 0; i < max_tgt_len; i++) { + // auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past); + // response += tmp; + // if (strcmp(tmp, "") == 0){ + // if(!have_tmp)continue; + // else break; + // } + // have_tmp = true; + // printf("%s", tmp); + // if (strstr(response.c_str(), "")) break; // minicpm-v + + // fflush(stdout); + // } + // llama_sampling_free(ctx_sampling); + // } + // else { if (params.image.size() > 1) { ctx_llava = llava_init_context(¶ms); process_prompt(0, ctx_llava, ¶ms, n_past); @@ -585,7 +585,7 @@ int main(int argc, char ** argv) { ctx_llava->model = NULL; llava_free(ctx_llava); - } + // } return 0; } \ No newline at end of file diff --git a/examples/llava/minicpmv-convert/minicpmv2_0-convert-image-encoder-to-gguf.py b/examples/llava/minicpmv-convert/minicpmv2_0-convert-image-encoder-to-gguf.py new file mode 100644 index 0000000000000..087512bd67fd9 --- /dev/null +++ b/examples/llava/minicpmv-convert/minicpmv2_0-convert-image-encoder-to-gguf.py @@ -0,0 +1,405 @@ +import argparse +import os +import json +import re + +import torch +import numpy as np +from gguf import * +import timm + +TEXT = "clip.text" +VISION = "clip.vision" + + +def k(raw_key: str, arch: str) -> str: + return raw_key.format(arch=arch) + + +def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_minicpmv: bool) -> bool: + if name in ( + "logit_scale", + "text_model.embeddings.position_ids", + "vision_model.embeddings.position_ids", + ): + return True + + if has_minicpmv and name in ["visual_projection.weight"]: + return True + + if name.startswith("v") and not has_vision: + return True + + if name.startswith("t") and not has_text: + return True + + return False + + +def get_tensor_name(name: str) -> str: + if "projection" in name: + return name + if "mm_projector" in name: + name = name.replace("model.mm_projector", "mm") + name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1) + name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1) + return name + + return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln") + + +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True) +ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16") +ap.add_argument("--text-only", action="store_true", required=False, + help="Save a text-only model. It can't be used to encode images") +ap.add_argument("--vision-only", action="store_true", required=False, + help="Save a vision-only model. It can't be used to encode texts") +ap.add_argument("--clip-model-is-vision", action="store_true", required=False, + help="The clip model is a pure vision model (ShareGPT4V vision extract for example)") +ap.add_argument("--clip-model-is-openclip", action="store_true", required=False, + help="The clip model is from openclip (for ViT-SO400M type))") +ap.add_argument("--minicpmv-projector", help="Path to minicpmv.projector file. If specified, save an image encoder for minicpmv models.") +ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2"], default="mlp") +ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None) +# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711 +# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5 +default_image_mean = [0.48145466, 0.4578275, 0.40821073] +default_image_std = [0.26862954, 0.26130258, 0.27577711] +ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None) +ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None) + +# with proper +args = ap.parse_args() + + +if args.text_only and args.vision_only: + print("--text-only and --image-only arguments cannot be specified at the same time.") + exit(1) + +if args.use_f32: + print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.") + +# output in the same directory as the model if output_dir is None +dir_model = args.model_dir + +if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip: + vocab = None + tokens = None +else: + with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: + vocab = json.load(f) + tokens = [key for key in vocab] + +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if args.use_f32: + ftype = 0 + +# if args.clip_model_is_vision or args.clip_model_is_openclip: +# model = CLIPVisionModel.from_pretrained(dir_model) +# processor = None +# else: +# model = CLIPModel.from_pretrained(dir_model) +# processor = CLIPProcessor.from_pretrained(dir_model) +model = timm.create_model( + "vit_so400m_patch14_siglip_384.webli", + pretrained=False, + num_classes=0, + dynamic_img_size=True, + dynamic_img_pad=True, +) +processor = None +if model.attn_pool is not None: + model.attn_pool = torch.nn.Identity() + +model.blocks = model.blocks[:-1] +model.load_state_dict(torch.load(os.path.join(dir_model, "minicpmv.clip"))) + +fname_middle = None +has_text_encoder = True +has_vision_encoder = True +has_minicpmv_projector = False +if args.text_only: + fname_middle = "text-" + has_vision_encoder = False +elif args.minicpmv_projector is not None: + fname_middle = "mmproj-" + has_text_encoder = False + has_minicpmv_projector = True + minicpmv_version = 1 +elif args.vision_only: + fname_middle = "vision-" + has_text_encoder = False +else: + fname_middle = "" + +output_dir = args.output_dir if args.output_dir is not None else dir_model +os.makedirs(output_dir, exist_ok=True) +output_prefix = os.path.basename(output_dir).replace("ggml_", "") +fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf") +fout = GGUFWriter(path=fname_out, arch="clip") + +fout.add_bool("clip.has_text_encoder", has_text_encoder) +fout.add_bool("clip.has_vision_encoder", has_vision_encoder) +fout.add_bool("clip.has_minicpmv_projector", has_minicpmv_projector) +fout.add_file_type(ftype) +if args.text_only: + fout.add_description("text-only CLIP model") +elif args.vision_only and not has_minicpmv_projector: + fout.add_description("vision-only CLIP model") +elif has_minicpmv_projector: + fout.add_description("image encoder for MiniCPM-V") + # add projector type + fout.add_string("clip.projector_type", "resampler") + fout.add_int32("clip.minicpmv_version", minicpmv_version) +else: + fout.add_description("two-tower CLIP model") + +if has_vision_encoder: + # vision_model hparams + fout.add_uint32("clip.vision.image_size", 448) + fout.add_uint32("clip.vision.patch_size", 14) + fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), 1152) + fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), 4304) + fout.add_uint32("clip.vision.projection_dim", 0) + fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), 16) + fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6) + block_count = 26 + fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count) + + if processor is not None: + image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean + image_std = processor.image_processor.image_std if args.image_std is None or args.image_std == default_image_std else args.image_std + else: + image_mean = args.image_mean if args.image_mean is not None else default_image_mean + image_std = args.image_std if args.image_std is not None else default_image_std + fout.add_array("clip.vision.image_mean", image_mean) + fout.add_array("clip.vision.image_std", image_std) + +use_gelu = True +fout.add_bool("clip.use_gelu", use_gelu) + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float32) + omega /= embed_dim / 2. + omega = 1. / 10000 ** omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20 +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if isinstance(grid_size, int): + grid_h_size, grid_w_size = grid_size, grid_size + else: + grid_h_size, grid_w_size = grid_size[0], grid_size[1] + + grid_h = np.arange(grid_h_size, dtype=np.float32) + grid_w = np.arange(grid_w_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_h_size, grid_w_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + +def _replace_name_resampler(s, v): + if re.match("resampler.pos_embed", s): + return { + s: v, + re.sub("pos_embed", "pos_embed_k", s): torch.from_numpy(get_2d_sincos_pos_embed(2304, (448//14, 448//14))), + } + if re.match("resampler.proj", s): + return { + re.sub("proj", "proj.weight", s): v.transpose(-1, -2).contiguous(), + } + if re.match("resampler.attn.in_proj_.*", s): + return { + re.sub("attn.in_proj_", "attn.q.", s): v.chunk(3, dim=0)[0], + re.sub("attn.in_proj_", "attn.k.", s): v.chunk(3, dim=0)[1], + re.sub("attn.in_proj_", "attn.v.", s): v.chunk(3, dim=0)[2], + } + return {s: v} + +if has_minicpmv_projector: + projector = torch.load(args.minicpmv_projector) + new_state_dict = {} + for k, v in projector.items(): + kvs = _replace_name_resampler(k, v) + for nk, nv in kvs.items(): + new_state_dict[nk] = nv + projector = new_state_dict + for name, data in projector.items(): + name = get_tensor_name(name) + data = data.squeeze().numpy() + + n_dims = len(data.shape) + if ftype == 1: + if name[-7:] == ".weight" and n_dims == 2: + print(" Converting to float16") + data = data.astype(np.float16) + ftype_cur = 1 + else: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + else: + if data.dtype != np.float32: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + + fout.add_tensor(name, data) + print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}") + + print("Projector tensors added\n") + +def _replace_name(s, v): + if re.match("blocks.([0-9]+).attn.qkv.weight", s): + return { + re.sub("blocks.([0-9]+).attn.qkv.weight", "vision_model.encoder.layers.\\1.self_attn.q_proj.weight", s): v.chunk(3, dim=0)[0], + re.sub("blocks.([0-9]+).attn.qkv.weight", "vision_model.encoder.layers.\\1.self_attn.k_proj.weight", s): v.chunk(3, dim=0)[1], + re.sub("blocks.([0-9]+).attn.qkv.weight", "vision_model.encoder.layers.\\1.self_attn.v_proj.weight", s): v.chunk(3, dim=0)[2], + } + if re.match("blocks.([0-9]+).attn.qkv.bias", s): + return { + re.sub("blocks.([0-9]+).attn.qkv.bias", "vision_model.encoder.layers.\\1.self_attn.q_proj.bias", s): v.chunk(3, dim=0)[0], + re.sub("blocks.([0-9]+).attn.qkv.bias", "vision_model.encoder.layers.\\1.self_attn.k_proj.bias", s): v.chunk(3, dim=0)[1], + re.sub("blocks.([0-9]+).attn.qkv.bias", "vision_model.encoder.layers.\\1.self_attn.v_proj.bias", s): v.chunk(3, dim=0)[2], + } + if re.match("pos_embed", s): + from timm.layers import resample_abs_pos_embed + s = re.sub("pos_embed", "vision_model.embeddings.position_embedding.weight", s) + v = resample_abs_pos_embed(v, (448//14, 448//14), num_prefix_tokens=0) + return {s: v} + + s = re.sub("patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight", s) + s = re.sub("patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias", s) + + # norm + s = re.sub("blocks.([0-9]+).norm([0-9]+).weight", "vision_model.encoder.layers.\\1.layer_norm\\2.weight", s) + s = re.sub("blocks.([0-9]+).norm([0-9]+).bias", "vision_model.encoder.layers.\\1.layer_norm\\2.bias", s) + + s = re.sub("blocks.([0-9]+).attn.proj.weight", "vision_model.encoder.layers.\\1.self_attn.out_proj.weight", s) + s = re.sub("blocks.([0-9]+).attn.proj.bias", "vision_model.encoder.layers.\\1.self_attn.out_proj.bias", s) + + s = re.sub("blocks.([0-9]+).mlp.fc([0-9]+).weight", "vision_model.encoder.layers.\\1.mlp.fc\\2.weight", s) + s = re.sub("blocks.([0-9]+).mlp.fc([0-9]+).bias", "vision_model.encoder.layers.\\1.mlp.fc\\2.bias", s) + + s = re.sub("norm.weight", "vision_model.post_layernorm.weight", s) + s = re.sub("norm.bias", "vision_model.post_layernorm.bias", s) + + return {s: v} + +state_dict = model.state_dict() +new_state_dict = {} +for k, v in state_dict.items(): + kvs = _replace_name(k, v) + for nk, nv in kvs.items(): + new_state_dict[nk] = nv +state_dict = new_state_dict +for name, data in state_dict.items(): + if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_minicpmv_projector): + # we don't need this + print(f"skipping parameter: {name}") + continue + + name = get_tensor_name(name) + data = data.squeeze().numpy() + + n_dims = len(data.shape) + + # ftype == 0 -> float32, ftype == 1 -> float16 + ftype_cur = 0 + if n_dims == 4: + print(f"tensor {name} is always saved in f16") + data = data.astype(np.float16) + ftype_cur = 1 + elif ftype == 1: + if name[-7:] == ".weight" and n_dims == 2: + print(" Converting to float16") + data = data.astype(np.float16) + ftype_cur = 1 + else: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + else: + if data.dtype != np.float32: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + + print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}") + fout.add_tensor(name, data) + + +fout.write_header_to_file() +fout.write_kv_data_to_file() +fout.write_tensors_to_file() +fout.close() + +print("Done. Output file: " + fname_out) diff --git a/examples/llava/minicpmv-convert/minicpmv2_0-surgery.py b/examples/llava/minicpmv-convert/minicpmv2_0-surgery.py new file mode 100644 index 0000000000000..cb6168e624e1d --- /dev/null +++ b/examples/llava/minicpmv-convert/minicpmv2_0-surgery.py @@ -0,0 +1,48 @@ +import argparse +import glob +import os +import torch +from transformers import AutoModel, AutoTokenizer + +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model", help="Path to MiniCPM-V-2.0 model") +args = ap.parse_args() + +# find the model part that includes the the multimodal projector weights +model = AutoModel.from_pretrained(args.model, trust_remote_code=True) +checkpoint = model.state_dict() + +# get a list of mm tensor names +mm_tensors = [k for k, v in checkpoint.items() if k.startswith("resampler")] + +# store these tensors in a new dictionary and torch.save them +projector = {name: checkpoint[name].float() for name in mm_tensors} +torch.save(projector, f"{args.model}/minicpmv.projector") + +clip_tensors = [k for k, v in checkpoint.items() if k.startswith("vpm")] +if len(clip_tensors) > 0: + clip = {name.replace("vpm.", ""): checkpoint[name].float() for name in clip_tensors} + torch.save(clip, f"{args.model}/minicpmv.clip") + + # added tokens should be removed to be able to convert Mistral models + if os.path.exists(f"{args.model}/added_tokens.json"): + with open(f"{args.model}/added_tokens.json", "w") as f: + f.write("{}\n") + +config = model.llm.config +config._name_or_path = "openbmb/CPM-2B" +config.auto_map = { + "AutoConfig": "configuration_minicpm.MiniCPMConfig", + "AutoModel": "modeling_minicpm.MiniCPMModel", + "AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM", + "AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM", + "AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification" +} +model.llm.save_pretrained(f"{args.model}/model") +tok = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True) +tok.save_pretrained(f"{args.model}/model") +# os.system(f"cp {args.model}/modeling_minicpm.py {args.model}/model/modeling_minicpm.py") + +print("Done!") +print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.") +print(f"Also, use {args.model}/minicpmv.projector to prepare a minicpmv-encoder.gguf file.") diff --git a/examples/llava/minicpmv-convert/minicpmv2_5-convert-image-encoder-to-gguf.py b/examples/llava/minicpmv-convert/minicpmv2_5-convert-image-encoder-to-gguf.py index b320a0e86e945..fa361bea3bfe3 100644 --- a/examples/llava/minicpmv-convert/minicpmv2_5-convert-image-encoder-to-gguf.py +++ b/examples/llava/minicpmv-convert/minicpmv2_5-convert-image-encoder-to-gguf.py @@ -166,7 +166,7 @@ def bytes_to_unicode(): fname_middle = "mmproj-" has_text_encoder = False has_minicpmv_projector = True - minicpmv_version = 3 + minicpmv_version = 2 elif args.vision_only: fname_middle = "vision-" has_text_encoder = False