diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 443a981dff45..c9dba2273d8a 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,45 +1,45 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 3565 +DISTVERSION= 3567 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS OPTIONS_DEFINE= EXAMPLES VULKAN OPTIONS_DEFAULT= VULKAN OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false # 2 tests fail: https://github.com/ggerganov/llama.cpp/issues/8906 .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index ddb09bc6b55b..43c10bf8ee2e 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1723346195 -SHA256 (ggerganov-llama.cpp-b3565_GH0.tar.gz) = 8ae020429a91ae4eab947d8cc14424188bc867b3d15f85da5e768e156f8490c7 -SIZE (ggerganov-llama.cpp-b3565_GH0.tar.gz) = 19034159 +TIMESTAMP = 1723409059 +SHA256 (ggerganov-llama.cpp-b3567_GH0.tar.gz) = 22fec4f058e8edaa02f927cccf5c4ca1e038c8565ed9e261b89b02adb528e5d8 +SIZE (ggerganov-llama.cpp-b3567_GH0.tar.gz) = 19034183 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496