forked from ggerganov/llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pixi.toml
28 lines (26 loc) · 967 Bytes
/
pixi.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[project]
name = "llama.cpp"
version = "0.1.0"
description = "Add a short description here"
authors = ["Tim de Jager <[email protected]>"]
channels = ["conda-forge"]
platforms = ["osx-arm64", "win-64", "linux-64"]
[tasks]
build = "mkdir -p build && cd build && cmake .. -G Ninja && cmake --build . --config Release"
build-blas = "make 'LLAMA_OPENBLAS=1'"
build-metal = "LLAMA_METAL=1 make"
download_model = {cmd = "python ./download_open_llama.py", depends_on = ["build"]}
convert_model = {cmd = "python check-convert.py", depends_on=["download_model"]}
interactive = {cmd = """./build/bin/main -m ./models/open_llama/ggml-model-f16.bin
-c 512 -b 1024 -n 256 --keep 48 \
--repeat_penalty 1.0 --color -i \
-r 'User:' -f prompts/chat-with-bob.txt""", depends_on=["convert_model"] }
[dependencies]
make = "4.3.*"
cxx-compiler = "1.5.2.*"
numpy = "1.25.1.*"
requests = "2.31.0.*"
tqdm = "4.65.0.*"
sentencepiece = "0.1.99.*"
cmake = "3.26.4.*"
ninja = "1.11.1.*"