Skip to content

Commit

Permalink
Created using Colaboratory
Browse files Browse the repository at this point in the history
  • Loading branch information
jonbaer committed Aug 27, 2023
1 parent 93fd24b commit 5b7175a
Showing 1 changed file with 132 additions and 0 deletions.
132 changes: 132 additions & 0 deletions llama2.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "HLdoj4cz-xal"
},
"source": [
"# Llama2.py\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tairov/llama2.py/blob/master/llama2.ipynb)\n",
"\n",
"More details can be found in the [README.md](README.md) ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Une3Ozlnu1B7"
},
"outputs": [],
"source": [
"#@title Clone Project\n",
"\n",
"!git clone https://github.com/tairov/llama2.py.git\n",
"%cd llama2.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "thm0ZBrtSgoC"
},
"outputs": [],
"source": [
"#@title Pick Your Model\n",
"\n",
"#@markdown Choose model\n",
"model = \"stories15M\" #@param [\"stories15M\", \"stories42M\", \"stories110M\"]\n",
"\n",
"download_url = \"\"\n",
"\n",
"if(model == \"stories15M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin\"\n",
"if(model == \"stories42M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin\"\n",
"if(model == \"stories110M\"):\n",
" download_url = \"https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin\"\n",
"\n",
"print(f\"download_url: {download_url}\")\n",
"\n",
"!wget $download_url\n",
"\n",
"model_file = model + \".bin\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OgAc3KjuT-NM"
},
"outputs": [],
"source": [
"#@title Generate Stories\n",
"\n",
"# Generate args\n",
"max_token = 256 #@param {type:\"slider\", min:32, max:1024, step:32}\n",
"temperature = 0.8 #@param {type:\"slider\", min:0.0, max:1, step:0.05}\n",
"# top_p = 0.9 #@param {type:\"slider\", min:0.0, max:1.0, step:0.05}\n",
"prompt = \"One day, Lily met a Shoggoth\" #@param {type:\"string\"}\n",
"\n",
"print(f\"model: {model_file}, max_token: {max_token}, temperature: {temperature}, top_p: {top_p}, prompt: {prompt}\")\n",
"print(f\"----------------------------\\n\")\n",
"\n",
"cmd = f'python3 llama2.py {model_file} {temperature} {max_token} \"{prompt}\"'\n",
"!{cmd}"
]
},
{
"cell_type": "code",
"source": [
"!pip install huggingface_hub"
],
"metadata": {
"id": "3NDq_u_7YYjg"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "bH8Xo_HJWDsC"
},
"outputs": [],
"source": [
"#@title Run Meta's Llama 2 models\n",
"\n",
"#@markdown input your huggingface [access token](https://huggingface.co/settings/tokens) to download Meta's Llama 2 models.\n",
"\n",
"from huggingface_hub import snapshot_download\n",
"\n",
"token = \"replace to HF token\" #@param {type:\"string\"}\n",
"path = snapshot_download(repo_id=\"meta-llama/Llama-2-7b\",cache_dir=\"Llama-2-7b\", use_auth_token=token)\n",
"\n",
"!python export_meta_llama_bin.py $path llama2_7b.bin\n",
"\n",
"print(\"python3 llama2.py llama2_7b.bin\\n\")\n",
"!python3 llama2.py llama2_7b.bin"
]
}
],
"metadata": {
"colab": {
"private_outputs": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

0 comments on commit 5b7175a

Please sign in to comment.