diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 6edb9aebe139..56b4ea7aa439 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,40 +1,40 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 2961 +DISTVERSION= 2972 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS LDFLAGS+= -pthread OPTIONS_DEFINE= EXAMPLES OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES BINARY_ALIAS= git=false # 1 test fails due to a missing model file (stories260K.gguf) .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index e0fcd6b26928..8f59ba82f4e3 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1716351835 -SHA256 (ggerganov-llama.cpp-b2961_GH0.tar.gz) = 913df9e9de929520d5b837e9c5ef31f1d70c3b51d065bc50a15642954d1128fb -SIZE (ggerganov-llama.cpp-b2961_GH0.tar.gz) = 20159850 +TIMESTAMP = 1716438065 +SHA256 (ggerganov-llama.cpp-b2972_GH0.tar.gz) = a0bf49bed07c0275e3950cd654aa57c53d3de39bfed5beb9e923cc00a93bb1bc +SIZE (ggerganov-llama.cpp-b2972_GH0.tar.gz) = 20164656 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496