Skip to content

Commit 52c0c77

Browse files
authored
Fix llama-cpp-version to 0.1.78 to support GGML3
1 parent bca9997 commit 52c0c77

32 files changed

+50
-50
lines changed

13B_BlueMethod.ipynb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
{
44
"cell_type": "markdown",
55
"metadata": {
6-
"id": "view-in-github",
7-
"colab_type": "text"
6+
"colab_type": "text",
7+
"id": "view-in-github"
88
},
99
"source": [
1010
"<a href=\"https://colab.research.google.com/github/Troyanovsky/Local-LLM-comparison/blob/main/13B_BlueMethod.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
@@ -44,8 +44,8 @@
4444
},
4545
"outputs": [
4646
{
47-
"output_type": "stream",
4847
"name": "stdout",
48+
"output_type": "stream",
4949
"text": [
5050
"/content\n",
5151
"Selecting previously unselected package libc-ares2:amd64.\n",
@@ -367,7 +367,6 @@
367367
]
368368
},
369369
{
370-
"output_type": "display_data",
371370
"data": {
372371
"application/vnd.colab-display-data+json": {
373372
"pip_warning": {
@@ -377,11 +376,12 @@
377376
}
378377
}
379378
},
380-
"metadata": {}
379+
"metadata": {},
380+
"output_type": "display_data"
381381
},
382382
{
383-
"output_type": "stream",
384383
"name": "stdout",
384+
"output_type": "stream",
385385
"text": [
386386
"Collecting gradio==3.32.0\n",
387387
" Downloading gradio-3.32.0-py3-none-any.whl (19.9 MB)\n",
@@ -720,7 +720,7 @@
720720
"!pip install -U gradio==3.32.0\n",
721721
"\n",
722722
"!pip uninstall -y llama-cpp-python\n",
723-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
723+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
724724
"\n",
725725
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/13B-BlueMethod-GGML/resolve/main/13b-bluemethod.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o 13b-bluemethod.ggmlv3.q5_K_M.bin\n",
726726
"\n",
@@ -732,9 +732,9 @@
732732
"metadata": {
733733
"accelerator": "GPU",
734734
"colab": {
735-
"provenance": [],
736735
"authorship_tag": "ABX9TyOJl75GURDW2ava57XHeTem",
737-
"include_colab_link": true
736+
"include_colab_link": true,
737+
"provenance": []
738738
},
739739
"gpuClass": "standard",
740740
"kernelspec": {
@@ -747,4 +747,4 @@
747747
},
748748
"nbformat": 4,
749749
"nbformat_minor": 0
750-
}
750+
}

13B_Ouroboros.ipynb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
{
44
"cell_type": "markdown",
55
"metadata": {
6-
"id": "view-in-github",
7-
"colab_type": "text"
6+
"colab_type": "text",
7+
"id": "view-in-github"
88
},
99
"source": [
1010
"<a href=\"https://colab.research.google.com/github/Troyanovsky/Local-LLM-Comparison-Colab-UI/blob/main/13B_Ouroboros.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
@@ -44,8 +44,8 @@
4444
},
4545
"outputs": [
4646
{
47-
"output_type": "stream",
4847
"name": "stdout",
48+
"output_type": "stream",
4949
"text": [
5050
"/content\n",
5151
"Selecting previously unselected package libc-ares2:amd64.\n",
@@ -365,7 +365,6 @@
365365
]
366366
},
367367
{
368-
"output_type": "display_data",
369368
"data": {
370369
"application/vnd.colab-display-data+json": {
371370
"pip_warning": {
@@ -375,11 +374,12 @@
375374
}
376375
}
377376
},
378-
"metadata": {}
377+
"metadata": {},
378+
"output_type": "display_data"
379379
},
380380
{
381-
"output_type": "stream",
382381
"name": "stdout",
382+
"output_type": "stream",
383383
"text": [
384384
"Collecting gradio==3.32.0\n",
385385
" Downloading gradio-3.32.0-py3-none-any.whl (19.9 MB)\n",
@@ -673,7 +673,7 @@
673673
"!pip install -U gradio==3.32.0\n",
674674
"\n",
675675
"!pip uninstall -y llama-cpp-python\n",
676-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
676+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
677677
"\n",
678678
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/13B-Ouroboros-GGML/resolve/main/13b-ouroboros.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o 13b-ouroboros.ggmlv3.q5_K_M.bin\n",
679679
"\n",
@@ -685,9 +685,9 @@
685685
"metadata": {
686686
"accelerator": "GPU",
687687
"colab": {
688-
"provenance": [],
689688
"authorship_tag": "ABX9TyNOR/ngj5ScqyMK31UQsSAm",
690-
"include_colab_link": true
689+
"include_colab_link": true,
690+
"provenance": []
691691
},
692692
"gpuClass": "standard",
693693
"kernelspec": {
@@ -700,4 +700,4 @@
700700
},
701701
"nbformat": 4,
702702
"nbformat_minor": 0
703-
}
703+
}

AlpacaCielo_13B.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@
664664
"!pip install -U gradio==3.32.0\n",
665665
"\n",
666666
"!pip uninstall -y llama-cpp-python\n",
667-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
667+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
668668
"\n",
669669
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/AlpacaCielo-13B-GGML/resolve/main/alpacacielo-13b.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o alpacacielo-13b.ggmlv3.q5_K_M.bin\n",
670670
"\n",

Camel_Platypus2_13B.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -683,7 +683,7 @@
683683
"!pip install -U gradio==3.32.0\n",
684684
"\n",
685685
"!pip uninstall -y llama-cpp-python\n",
686-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
686+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
687687
"\n",
688688
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/Camel-Platypus2-13B-GGML/resolve/main/camel-platypus2-13b.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o camel-platypus2-13b.ggmlv3.q5_K_M.bin\n",
689689
"\n",

Chronos_13B_v2.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@
661661
"!pip install -U gradio==3.32.0\n",
662662
"\n",
663663
"!pip uninstall -y llama-cpp-python\n",
664-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
664+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
665665
"\n",
666666
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/Chronos-13B-v2-GGML/resolve/main/chronos-13b-v2.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o chronos-13b-v2.ggmlv3.q5_K_M.bin\n",
667667
"\n",

Chronos_Beluga_v2_13B.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@
661661
"!pip install -U gradio==3.32.0\n",
662662
"\n",
663663
"!pip uninstall -y llama-cpp-python\n",
664-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
664+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
665665
"\n",
666666
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/Chronos-Beluga-v2-13B-GGML/resolve/main/chronos-beluga-v2-13b.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o chronos-beluga-v2-13b.ggmlv3.q5_K_M.bin\n",
667667
"\n",

Chronos_Hermes_13B_v2.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@
661661
"!pip install -U gradio==3.32.0\n",
662662
"\n",
663663
"!pip uninstall -y llama-cpp-python\n",
664-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
664+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
665665
"\n",
666666
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/Chronos-Hermes-13B-v2-GGML/resolve/main/chronos-hermes-13b-v2.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o chronos-hermes-13b-v2.ggmlv3.q5_K_M.bin\n",
667667
"\n",

CodeUp_Alpha_13B_HF.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,7 @@
597597
"!pip install -U gradio==3.32.0\n",
598598
"\n",
599599
"!pip uninstall -y llama-cpp-python\n",
600-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
600+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
601601
"\n",
602602
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/CodeUp-Alpha-13B-HF-GGML/resolve/main/codeup-alpha-13b-hf.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o codeup-alpha-13b-hf.ggmlv3.q5_K_M.bin\n",
603603
"\n",

CodeUp_Llama_2_13B_Chat_HF.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,7 @@
589589
"!pip install -U gradio==3.32.0\n",
590590
"\n",
591591
"!pip uninstall -y llama-cpp-python\n",
592-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
592+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
593593
"\n",
594594
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/CodeUp-Llama-2-13B-Chat-HF-GGML/resolve/main/codeup-llama-2-13b-chat-hf.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o codeup-llama-2-13b-chat-hf.ggmlv3.q5_K_M.bin\n",
595595
"\n",

Dolphin_Llama_13B.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -683,7 +683,7 @@
683683
"!pip install -U gradio==3.32.0\n",
684684
"\n",
685685
"!pip uninstall -y llama-cpp-python\n",
686-
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir\n",
686+
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.78 --no-cache-dir\n",
687687
"\n",
688688
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/TheBloke/Dolphin-Llama-13B-GGML/resolve/main/dolphin-llama-13b.ggmlv3.q5_K_M.bin -d /content/text-generation-webui/models/ -o dolphin-llama-13b.ggmlv3.q5_K_M.bin\n",
689689
"\n",

0 commit comments

Comments
 (0)