Skip to content

Commit

Permalink
Merge branch 'sidestream' into frankenstein_2
Browse files Browse the repository at this point in the history
  • Loading branch information
Nexesenex committed Apr 11, 2024
2 parents a112d6c + a8dd6b3 commit f45d7eb
Show file tree
Hide file tree
Showing 31 changed files with 3,653 additions and 2,892 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ models-mnt
/convert-llama2c-to-ggml
/embd-input-test
/embedding
/eval-callback
/gguf
/gguf-llama-simple
/gritlm
Expand Down
20 changes: 11 additions & 9 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1746,6 +1746,8 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
cparams.pooling_type = params.pooling_type;
cparams.defrag_thold = params.defrag_thold;
cparams.cb_eval = params.cb_eval;
cparams.cb_eval_user_data = params.cb_eval_user_data;
cparams.offload_kqv = !params.no_kv_offload;

cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
Expand Down Expand Up @@ -2193,7 +2195,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
params.sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
}

{
if (params.warmup) {
LOG("warming up the model with an empty run\n");

std::vector<llama_token> tmp = { llama_token_bos(model), llama_token_eos(model), };
Expand All @@ -2213,23 +2215,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
bool add_bos,
bool special) {
return llama_tokenize(llama_get_model(ctx), text, add_bos, special);
bool add_special,
bool parse_special) {
return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special);
}

std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
bool add_bos,
bool special) {
bool add_special,
bool parse_special) {
// upper limit for the number of tokens
int n_tokens = text.length() + add_bos;
int n_tokens = text.length() + 2 * add_special;
std::vector<llama_token> result(n_tokens);
n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
if (n_tokens < 0) {
result.resize(-n_tokens);
int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);
Expand Down
12 changes: 8 additions & 4 deletions common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ struct gpt_params {
int32_t yarn_orig_ctx = 0; // YaRN original context length
float defrag_thold = -1.0f; // KV cache defragmentation threshold

ggml_backend_sched_eval_callback cb_eval = nullptr;
void * cb_eval_user_data = nullptr;

ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;

llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
Expand Down Expand Up @@ -172,6 +175,7 @@ struct gpt_params {
bool infill = false; // use infill mode
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
bool no_kv_offload = false; // disable KV offloading
bool warmup = true; // warmup run

std::string cache_type_k = "f16"; // KV cache data type for the K
std::string cache_type_v = "f16"; // KV cache data type for the V
Expand Down Expand Up @@ -239,14 +243,14 @@ void llama_batch_add(
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
bool add_bos,
bool special = false);
bool add_special,
bool parse_special = false);

std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
bool add_bos,
bool special = false);
bool add_special,
bool parse_special = false);

// tokenizes a token into a piece
// should work similar to Python's `tokenizer.id_to_piece`
Expand Down
53 changes: 19 additions & 34 deletions convert-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,15 +227,14 @@ def _get_part_names(self):
return ("pytorch_model.bin",)
return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1))

def _set_vocab_gpt2(self):
dir_model = self.dir_model
hparams = self.hparams
# used for GPT-2 BPE and WordPiece vocabs
def get_basic_vocab(self) -> tuple[list[str], list[int]]:
tokens: list[str] = []
toktypes: list[int] = []

from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size

reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
Expand All @@ -255,11 +254,15 @@ def _set_vocab_gpt2(self):
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.NORMAL)

return tokens, toktypes

def _set_vocab_gpt2(self) -> None:
tokens, toktypes = self.get_basic_vocab()
self.gguf_writer.add_tokenizer_model("gpt2")
self.gguf_writer.add_token_list(tokens)
self.gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges=True)
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
special_vocab.add_to_gguf(self.gguf_writer)

def _set_vocab_qwen(self):
Expand Down Expand Up @@ -2043,34 +2046,25 @@ def set_gguf_parameters(self):
self.gguf_writer.add_pooling_type(pooling_type)

def set_vocab(self):
# use huggingface vocab to get all tokens
vocab = LlamaHfVocab(self.dir_model, ignore_nonllama=True)
tokens, scores, toktypes = zip(*vocab.all_tokens())
assert len(tokens) == vocab.vocab_size
self.vocab_size = vocab.vocab_size
tokens, toktypes = self.get_basic_vocab()
self.vocab_size = len(tokens)

# we need this to validate the size of the token_type embeddings
# though currently we are passing all zeros to the token_type embeddings
n_token_types = len(set(toktypes))
self.gguf_writer.add_token_type_count(n_token_types)
self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B"

# convert to phantom space vocab
def phantom(tok, typ):
if tok.startswith(b"[") and tok.endswith(b"]"):
def phantom(tok):
if tok.startswith("[") and tok.endswith("]"):
return tok
if tok.startswith(b"##"):
if tok.startswith("##"):
return tok[2:]
return b"\xe2\x96\x81" + tok
tokens = tuple(phantom(t, y) for t, y in zip(tokens, toktypes))

# set up bos and eos tokens (cls and sep)
self.gguf_writer.add_bos_token_id(vocab.tokenizer.cls_token_id)
self.gguf_writer.add_eos_token_id(vocab.tokenizer.sep_token_id)
return "\u2581" + tok
tokens = list(map(phantom, tokens))

# add vocab to gguf
self.gguf_writer.add_tokenizer_model("bert")
self.gguf_writer.add_token_list(tokens)
self.gguf_writer.add_token_scores(scores)
self.gguf_writer.add_token_types(toktypes)

# handle special tokens
Expand Down Expand Up @@ -2142,16 +2136,6 @@ def set_gguf_parameters(self):
super().set_gguf_parameters()
self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])

def get_tensors(self):
assert self.vocab_size is not None
for name, data in super().get_tensors():
# Nomic Embed's token embeddings tensor is padded, but llama.cpp wants tensor sizes to match exactly.
if name == 'embeddings.word_embeddings.weight' and data.shape[1] != self.vocab_size:
rounded_vocab_size = (self.vocab_size + 63) // 64 * 64
assert data.shape == (rounded_vocab_size, self.hparams["n_embd"])
data = data[:self.vocab_size, :]
yield name, data


@Model.register("GemmaForCausalLM")
class GemmaModel(Model):
Expand Down Expand Up @@ -2327,7 +2311,8 @@ def write_tensors(self):
data = data.astype(np.float32)

# if f16 desired, convert big float32 2-dim weight tensors to float16
if self.ftype == 1 and data_dtype == np.float32 and new_name.removesuffix(".weight").endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else ""
if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2:
data = data.astype(np.float16)

print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
Expand Down
2 changes: 2 additions & 0 deletions convert-persimmon-to-gguf.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
#!/usr/bin/env python3
from __future__ import annotations

import argparse
import os
import sys
Expand Down
23 changes: 11 additions & 12 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import gguf

if TYPE_CHECKING:
from typing import TypeAlias
from typing_extensions import Self, TypeAlias

if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
faulthandler.register(signal.SIGUSR1)
Expand Down Expand Up @@ -517,17 +517,15 @@ class LlamaHfVocab(Vocab):
tokenizer_model = "llama"
name = "hfft"

def __init__(self, base_path: Path, ignore_nonllama: bool = False):
def __init__(self, base_path: Path):
fname_tokenizer = base_path / FAST_TOKENIZER_FILE
# if this fails, FileNotFoundError propagates to caller
with open(fname_tokenizer, encoding='utf-8') as f:
tokenizer_json = json.load(f)

# pre-check so we know if we need transformers
tokenizer_model: dict[str, Any] = tokenizer_json['model']
if ignore_nonllama:
pass # workaround incorrect use of this class for WordPiece
elif (
if (
tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
or tokenizer_json['decoder']['type'] != 'Sequence'
):
Expand Down Expand Up @@ -647,16 +645,17 @@ def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:


class Tensor(ABC):
ndarray: NDArray
data_type: DataType

@abstractmethod
def astype(self, data_type: DataType) -> Tensor: ...
def astype(self, data_type: DataType) -> Self: ...
@abstractmethod
def permute(self, n_head: int, n_head_kv: int) -> Tensor: ...
def permute(self, n_head: int, n_head_kv: int) -> Self: ...
@abstractmethod
def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> UnquantizedTensor: ...
def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> Self: ...
@abstractmethod
def part(self, n_part: int) -> UnquantizedTensor: ...
def part(self, n_part: int) -> Self: ...
@abstractmethod
def to_ggml(self) -> GGMLCompatibleTensor: ...

Expand All @@ -673,13 +672,13 @@ def __init__(self, ndarray: NDArray):
self.ndarray = ndarray
self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]

def astype(self, data_type: DataType) -> Tensor:
def astype(self, data_type: DataType) -> UnquantizedTensor:
dtype = data_type.dtype
if self.data_type == DT_BF16:
self.ndarray = bf16_to_fp32(self.ndarray)
return UnquantizedTensor(self.ndarray.astype(dtype))

def to_ggml(self) -> UnquantizedTensor:
def to_ggml(self) -> Self:
return self

def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> UnquantizedTensor:
Expand Down Expand Up @@ -1351,7 +1350,7 @@ def load_some_model(path: Path) -> ModelPlus:
# Be extra-friendly and accept either a file or a directory:
if path.is_dir():
# Check if it's a set of safetensors files first
globs = ["model-00001-of-*.safetensors", "model.safetensors"]
globs = ["model-00001-of-*.safetensors", "model.safetensors", "consolidated.safetensors"]
files = [file for glob in globs for file in path.glob(glob)]
if not files:
# Try the PyTorch patterns too, with lower priority
Expand Down
Loading

0 comments on commit f45d7eb

Please sign in to comment.