diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index d366f823d788..265cb9bc7fb3 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,49 +1,45 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 3542 +DISTVERSION= 3560 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 -LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader - USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS -LDFLAGS+= -pthread - OPTIONS_DEFINE= EXAMPLES VULKAN OPTIONS_DEFAULT= VULKAN OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false # 2 tests fail: https://github.com/ggerganov/llama.cpp/issues/8906 .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 57bc61877f32..13c27e8f340b 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1723097492 -SHA256 (ggerganov-llama.cpp-b3542_GH0.tar.gz) = 6f8b23d930400fce5708d2c85022ef33f1083af8f6ac395abefadacee0942e78 -SIZE (ggerganov-llama.cpp-b3542_GH0.tar.gz) = 19014348 +TIMESTAMP = 1723230415 +SHA256 (ggerganov-llama.cpp-b3560_GH0.tar.gz) = c61f0fd2ed3f83c7a9775343c5a72cb3c7ca61630553fdb495220ad80ca063ed +SIZE (ggerganov-llama.cpp-b3560_GH0.tar.gz) = 19029390 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index 7d7cc039fb51..22ca598402bf 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -1,55 +1,56 @@ bin/convert_hf_to_gguf.py bin/llama-baby-llama bin/llama-batched bin/llama-batched-bench bin/llama-bench bin/llama-bench-matmult bin/llama-cli bin/llama-convert-llama2c-to-ggml bin/llama-cvector-generator bin/llama-embedding %%EXAMPLES%%bin/llama-eval-callback %%EXAMPLES%%bin/llama-export-lora %%EXAMPLES%%bin/llama-gbnf-validator %%EXAMPLES%%bin/llama-gguf %%EXAMPLES%%bin/llama-gguf-hash %%EXAMPLES%%bin/llama-gguf-split %%EXAMPLES%%bin/llama-gritlm %%EXAMPLES%%bin/llama-imatrix %%EXAMPLES%%bin/llama-infill %%EXAMPLES%%bin/llama-llava-cli %%EXAMPLES%%bin/llama-lookahead %%EXAMPLES%%bin/llama-lookup %%EXAMPLES%%bin/llama-lookup-create %%EXAMPLES%%bin/llama-lookup-merge %%EXAMPLES%%bin/llama-lookup-stats +%%EXAMPLES%%bin/llama-minicpmv-cli %%EXAMPLES%%bin/llama-parallel %%EXAMPLES%%bin/llama-passkey %%EXAMPLES%%bin/llama-perplexity %%EXAMPLES%%bin/llama-quantize %%EXAMPLES%%bin/llama-quantize-stats %%EXAMPLES%%bin/llama-retrieval %%EXAMPLES%%bin/llama-save-load-state %%EXAMPLES%%bin/llama-server %%EXAMPLES%%bin/llama-simple %%EXAMPLES%%bin/llama-speculative %%EXAMPLES%%bin/llama-tokenize %%VULKAN%%bin/vulkan-shaders-gen include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h include/ggml-cann.h include/ggml-cuda.h include/ggml-kompute.h include/ggml-metal.h include/ggml-rpc.h include/ggml-sycl.h include/ggml-vulkan.h include/ggml.h include/llama.h lib/cmake/llama/llama-config.cmake lib/cmake/llama/llama-version.cmake lib/libggml.so lib/libllama.so %%EXAMPLES%%lib/libllava_shared.so libdata/pkgconfig/llama.pc