Skip to content

Commit

Permalink
Use separate llama-cpp-python packages for GGML support
Browse files Browse the repository at this point in the history
  • Loading branch information
jllllll committed Aug 26, 2023
1 parent 6e6431e commit 4a999e3
Show file tree
Hide file tree
Showing 7 changed files with 72 additions and 15 deletions.
15 changes: 13 additions & 2 deletions download-model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False):
classifications = []
has_pytorch = False
has_pt = False
# has_gguf = False
has_gguf = False
has_ggml = False
has_safetensors = False
is_lora = False
while True:
Expand All @@ -79,6 +80,7 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False):
is_safetensors = re.match(r".*\.safetensors", fname)
is_pt = re.match(r".*\.pt", fname)
is_gguf = re.match(r'.*\.gguf', fname)
is_ggml = re.match(r".*ggml.*\.bin", fname)
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)):
Expand All @@ -102,8 +104,11 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False):
has_pt = True
classifications.append('pt')
elif is_gguf:
# has_gguf = True
has_gguf = True
classifications.append('gguf')
elif is_ggml:
has_ggml = True
classifications.append('ggml')

cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
cursor = base64.b64encode(cursor)
Expand All @@ -115,6 +120,12 @@ def get_download_links_from_huggingface(self, model, branch, text_only=False):
if classifications[i] in ['pytorch', 'pt']:
links.pop(i)

# If both GGML and GGUF are available, download GGUF only
if has_ggml and has_gguf:
for i in range(len(classifications) - 1, -1, -1):
if classifications[i] == 'ggml':
links.pop(i)

return links, sha256, is_lora

def get_output_folder(self, model, branch, is_lora, base_folder=None):
Expand Down
25 changes: 20 additions & 5 deletions modules/llamacpp_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,38 @@

from modules import RoPE, shared
from modules.logging_colors import logger
from modules.utils import is_gguf

import llama_cpp

try:
import llama_cpp_ggml
except:
llama_cpp_ggml = llama_cpp

if torch.cuda.is_available() and not torch.version.hip:
try:
import llama_cpp_cuda
except:
llama_cpp_cuda = None
try:
import llama_cpp_ggml_cuda
except:
llama_cpp_ggml_cuda = llama_cpp_cuda
else:
llama_cpp_cuda = None
llama_cpp_ggml_cuda = None


def llama_cpp_lib():
def llama_cpp_lib(model_file: Union[str, Path] = None):
if model_file is not None:
gguf_model = is_gguf(model_file)
else:
gguf_model = True
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp
return llama_cpp if gguf_model else llama_cpp_ggml
else:
return llama_cpp_cuda
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda


class LlamacppHF(PreTrainedModel):
Expand Down Expand Up @@ -165,7 +180,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
if path.is_file():
model_file = path
else:
model_file = list(path.glob('*.gguf*'))[0]
model_file = (list(path.glob('*.gguf*')) + list(path.glob('*ggml*.bin')))[0]

logger.info(f"llama.cpp weights detected: {model_file}\n")

Expand Down Expand Up @@ -193,7 +208,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
'logits_all': True,
}

Llama = llama_cpp_lib().Llama
Llama = llama_cpp_lib(model_file).Llama
model = Llama(**params)

return LlamacppHF(model)
27 changes: 22 additions & 5 deletions modules/llamacpp_model.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,46 @@
import re
from functools import partial
from pathlib import Path
from typing import Union

import torch

from modules import RoPE, shared
from modules.callbacks import Iteratorize
from modules.logging_colors import logger
from modules.text_generation import get_max_prompt_length
from modules.utils import is_gguf

import llama_cpp

try:
import llama_cpp_ggml
except:
llama_cpp_ggml = llama_cpp

if torch.cuda.is_available() and not torch.version.hip:
try:
import llama_cpp_cuda
except:
llama_cpp_cuda = None
try:
import llama_cpp_ggml_cuda
except:
llama_cpp_ggml_cuda = llama_cpp_cuda
else:
llama_cpp_cuda = None
llama_cpp_ggml_cuda = None


def llama_cpp_lib():
def llama_cpp_lib(model_file: Union[str, Path] = None):
if model_file is not None:
gguf_model = is_gguf(model_file)
else:
gguf_model = True
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp
return llama_cpp if gguf_model else llama_cpp_ggml
else:
return llama_cpp_cuda
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda


def ban_eos_logits_processor(eos_token, input_ids, logits):
Expand All @@ -41,8 +58,8 @@ def __del__(self):
@classmethod
def from_pretrained(self, path):

Llama = llama_cpp_lib().Llama
LlamaCache = llama_cpp_lib().LlamaCache
Llama = llama_cpp_lib(str(path)).Llama
LlamaCache = llama_cpp_lib(str(path)).LlamaCache

result = self()
cache_capacity = 0
Expand Down
2 changes: 1 addition & 1 deletion modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
if path.is_file():
model_file = path
else:
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0]

logger.info(f"llama.cpp weights detected: {model_file}")
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
Expand Down
4 changes: 2 additions & 2 deletions modules/models_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ def infer_loader(model_name):
loader = None
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
loader = 'AutoGPTQ'
elif len(list(path_to_model.glob('*.gguf*'))) > 0:
elif len(list(path_to_model.glob('*.gguf*')) + list(path_to_model.glob('*ggml*.bin'))) > 0:
loader = 'llama.cpp'
elif re.match(r'.*\.gguf', model_name.lower()):
elif re.match(r'.*\.gguf|.*ggml.*\.bin', model_name.lower()):
loader = 'llama.cpp'
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
loader = 'RWKV'
Expand Down
9 changes: 9 additions & 0 deletions modules/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import re
from datetime import datetime
from pathlib import Path
from typing import Union

from modules import shared
from modules.logging_colors import logger
Expand Down Expand Up @@ -124,3 +125,11 @@ def get_datasets(path: str, ext: str):

def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)

# Determines if a llama.cpp model is in GGUF format
# Copied from ctransformers utils.py
def is_gguf(path: Union[str, Path]) -> bool:
path = str(Path(path).resolve())
with open(path, "rb") as f:
magic = f.read(4)
return magic == "GGUF".encode()
5 changes: 5 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_
# llama-cpp-python with CUDA support
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
# llama-cpp-python with GGML support
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

# GPTQ-for-LLaMa
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
Expand Down

0 comments on commit 4a999e3

Please sign in to comment.