Skip to content

Commit

Permalink
change GPT-J and GPT2 KVs to use fp16 instead
Browse files Browse the repository at this point in the history
  • Loading branch information
LostRuins committed Apr 5, 2023
1 parent 57e9f92 commit 1490cdd
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 12 deletions.
Binary file modified koboldcpp.dll
Binary file not shown.
Binary file modified koboldcpp_blas.dll
Binary file not shown.
10 changes: 6 additions & 4 deletions otherarch/gpt2_v1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g

auto & ctx = model.ctx;

auto memory_type = GGML_V1_TYPE_F16;

size_t ctx_size = 0;

{
Expand Down Expand Up @@ -119,8 +121,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_proj_b

ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_v
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_v

ctx_size += (6 + 12*n_layer)*256; // object overhead

Expand Down Expand Up @@ -218,8 +220,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem;

model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);

const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v);

Expand Down
10 changes: 6 additions & 4 deletions otherarch/gptj_v1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1

auto & ctx = model.ctx;

auto memory_type = GGML_V1_TYPE_F16;

size_t ctx_size = 0;

{
Expand Down Expand Up @@ -136,8 +138,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_proj_w_trans
ctx_size += n_layer*( n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_proj_b

ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_v
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_v

ctx_size += (5 + 10*n_layer)*256; // object overhead

Expand Down Expand Up @@ -240,8 +242,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1
const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem;

model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements);
model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements);

const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v);

Expand Down
10 changes: 6 additions & 4 deletions otherarch/gptj_v2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g

auto & ctx = model.ctx;

auto memory_type = GGML_TYPE_F16;

size_t ctx_size = 0;

{
Expand Down Expand Up @@ -136,8 +138,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b

ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v

ctx_size += (5 + 10*n_layer)*256; // object overhead

Expand Down Expand Up @@ -234,8 +236,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem;

model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);

const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);

Expand Down

0 comments on commit 1490cdd

Please sign in to comment.