Skip to content

Commit

Permalink
Bump llama-cpp-python to use LlamaCache
Browse files Browse the repository at this point in the history
  • Loading branch information
oobabooga committed Apr 16, 2023
1 parent ac18901 commit d2ea925
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 3 deletions.
3 changes: 2 additions & 1 deletion modules/llamacpp_model_alternative.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
https://abetlen.github.io/llama-cpp-python/
'''

from llama_cpp import Llama
from llama_cpp import Llama, LlamaCache

from modules import shared
from modules.callbacks import Iteratorize
Expand All @@ -27,6 +27,7 @@ def from_pretrained(self, path):
'n_threads': shared.args.threads or None
}
self.model = Llama(**params)
self.model.set_cache(LlamaCache)

# This is ugly, but the model and the tokenizer are the same object in this library.
return result, result
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@ tqdm
git+https://github.com/huggingface/peft
transformers==4.28.0
bitsandbytes==0.38.1; platform_system != "Windows"
llama-cpp-python==0.1.33; platform_system != "Windows"
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.33/llama_cpp_python-0.1.33-cp310-cp310-win_amd64.whl; platform_system == "Windows"
llama-cpp-python==0.1.34; platform_system != "Windows"
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.34/llama_cpp_python-0.1.34-cp310-cp310-win_amd64.whl; platform_system == "Windows"

0 comments on commit d2ea925

Please sign in to comment.