diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 1558a4a1b591..67ec4f972e1a 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,49 +1,49 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 4061 +DISTVERSION= 4081 CATEGORIES= misc # machine-learning PATCH_SITES= https://github.com/${GH_ACCOUNT}/${GH_PROJECT}/commit/ PATCHFILES= 121f915a09c1117d34aff6e8faf6d252aaf11027.patch:-p1 # Add missing pthread includes: https://github.com/ggerganov/llama.cpp/pull/9258 MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 BROKEN_i386= compilation fails, see https://github.com/ggerganov/llama.cpp/issues/9545 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS OPTIONS_DEFINE= EXAMPLES VULKAN OPTIONS_DEFAULT= VULKAN OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false # 2 tests fail: https://github.com/ggerganov/llama.cpp/issues/8906 .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 2befa7e83f71..104b191e5e4c 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,7 +1,7 @@ -TIMESTAMP = 1731220083 -SHA256 (ggerganov-llama.cpp-b4061_GH0.tar.gz) = a759ea4d5fb0089ab2dd0c6fd43685b4d152673b70c822fc3e45ad1f089ca537 -SIZE (ggerganov-llama.cpp-b4061_GH0.tar.gz) = 19548637 +TIMESTAMP = 1731652256 +SHA256 (ggerganov-llama.cpp-b4081_GH0.tar.gz) = 29b0897bf0a54e94e969dca5081599fd1f22a818b8988fd3490201c5d60c4a07 +SIZE (ggerganov-llama.cpp-b4081_GH0.tar.gz) = 19563850 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 SHA256 (121f915a09c1117d34aff6e8faf6d252aaf11027.patch) = 9a0c47ae3cb7dd51b6ce19187dafd48578210f69558f7c8044ee480471f1fd33 SIZE (121f915a09c1117d34aff6e8faf6d252aaf11027.patch) = 591 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index 4bbe10e07004..7903cea3974f 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -1,56 +1,59 @@ bin/convert_hf_to_gguf.py bin/llama-batched bin/llama-batched-bench bin/llama-bench bin/llama-cli bin/llama-convert-llama2c-to-ggml bin/llama-cvector-generator bin/llama-embedding bin/llama-simple-chat %%EXAMPLES%%bin/llama-eval-callback %%EXAMPLES%%bin/llama-export-lora %%EXAMPLES%%bin/llama-gbnf-validator %%EXAMPLES%%bin/llama-gguf %%EXAMPLES%%bin/llama-gguf-hash %%EXAMPLES%%bin/llama-gguf-split %%EXAMPLES%%bin/llama-gritlm %%EXAMPLES%%bin/llama-imatrix %%EXAMPLES%%bin/llama-infill %%EXAMPLES%%bin/llama-llava-cli %%EXAMPLES%%bin/llama-lookahead %%EXAMPLES%%bin/llama-lookup %%EXAMPLES%%bin/llama-lookup-create %%EXAMPLES%%bin/llama-lookup-merge %%EXAMPLES%%bin/llama-lookup-stats %%EXAMPLES%%bin/llama-minicpmv-cli %%EXAMPLES%%bin/llama-parallel %%EXAMPLES%%bin/llama-passkey %%EXAMPLES%%bin/llama-perplexity %%EXAMPLES%%bin/llama-quantize %%EXAMPLES%%bin/llama-quantize-stats %%EXAMPLES%%bin/llama-retrieval %%EXAMPLES%%bin/llama-save-load-state %%EXAMPLES%%bin/llama-server %%EXAMPLES%%bin/llama-simple %%EXAMPLES%%bin/llama-speculative %%EXAMPLES%%bin/llama-tokenize %%VULKAN%%bin/vulkan-shaders-gen include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h include/ggml-cann.h include/ggml-cpu.h include/ggml-cuda.h include/ggml-kompute.h include/ggml-metal.h include/ggml-rpc.h include/ggml-sycl.h include/ggml-vulkan.h include/ggml.h include/llama.h lib/cmake/llama/llama-config.cmake lib/cmake/llama/llama-version.cmake lib/libggml.so +lib/libggml-base.so +lib/libggml-cpu.so +lib/libggml-vulkan.so lib/libllama.so %%EXAMPLES%%lib/libllava_shared.so libdata/pkgconfig/llama.pc