cd llama.cpp git clone https://github.com/ggerganov/llama.cpp cd llama.cpp mkdir build cd build cmake .. -DLLAMA_BLAS=OFF make -j2
git@github.com:ramaseshanms/llama_cpp_benchmarks.git
cp llama_cpp_benchmarks/inference_wrapper.* src/
cp llama_cpp_benchmarks/ src/main.cpp
g++ -O3 -std=c++17
src/main.cpp
src/inference_wrapper.cpp
-Iinclude
-Iggml/include
-Lbuild/bin
-lllama
-pthread
-o bin/custom_infer
LD_LIBRARY_PATH=$PWD/build/bin:$LD_LIBRARY_PATH ./bin/custom_infer | tee -a inference.log