diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index e880ababbdc1..0129909c8343 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,81 +1,81 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 5371 +DISTVERSION= 5689 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 BROKEN_i386= compilation fails, see https://github.com/ggerganov/llama.cpp/issues/9545 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= GGML_NATIVE \ FREEBSD_ALLOW_ADVANCED_CPU_FEATURES \ LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS # user for llama-server, only used when EXAMPLES=ON USER= nobody SUB_LIST= USER=${USER} OPTIONS_DEFINE= CURL EXAMPLES VULKAN OPTIONS_DEFAULT= CURL VULKAN OPTIONS_SUB= yes CURL_DESCR= Use libcurl to download model from an URL CURL_CMAKE_BOOL= LLAMA_CURL CURL_USES= localbase CURL_LIB_DEPENDS= libcurl.so:ftp/curl EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false \ python=${PYTHON_CMD} # for tests post-patch: # set version in the code @${REINPLACE_CMD} \ -e "s|set(BUILD_NUMBER 0)|set(BUILD_NUMBER ${DISTVERSION})|" \ ${WRKSRC}/cmake/build-info.cmake do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/issues/10955 @cd ${WRKSRC} && \ ${SETENV} ${MAKE_ENV} bash ci/run.sh ./tmp/results ./tmp/mnt .include .if ${PORT_OPTIONS:MEXAMPLES} USE_RC_SUBR= llama-server .endif # tests as of 4458: 97% tests passed, 1 tests failed out of 31, see https://github.com/ggerganov/llama.cpp/issues/11036 # tests as of 4649: # 88% tests passed, 4 tests failed out of 32 # The following tests FAILED: # 18 - test-chat (Subprocess aborted) main # see https://github.com/ggerganov/llama.cpp/issues/11705 # 24 - test-gguf (SEGFAULT) main # 25 - test-backend-ops (SEGFAULT) main # 32 - test-eval-callback (SEGFAULT) curl eval-callback .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 065e1989894b..c810b3383e6a 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1747201270 -SHA256 (ggerganov-llama.cpp-b5371_GH0.tar.gz) = cce50220507565b78423fc45a1c534dc088289ab898517a379fdbf733ffd72bf -SIZE (ggerganov-llama.cpp-b5371_GH0.tar.gz) = 21147325 +TIMESTAMP = 1750213757 +SHA256 (ggerganov-llama.cpp-b5689_GH0.tar.gz) = 1ab18411c1f9885770b331bcc1f70ce9b0ad29f8591d7cf516ab6cdc6a6c1a63 +SIZE (ggerganov-llama.cpp-b5689_GH0.tar.gz) = 24994019 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index 5b36c5809aaa..0a36dc3db062 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -1,66 +1,67 @@ bin/convert_hf_to_gguf.py %%EXAMPLES%%bin/llama-batched %%EXAMPLES%%bin/llama-batched-bench %%EXAMPLES%%bin/llama-bench %%EXAMPLES%%bin/llama-cli %%EXAMPLES%%bin/llama-convert-llama2c-to-ggml %%EXAMPLES%%bin/llama-cvector-generator %%EXAMPLES%%bin/llama-embedding %%EXAMPLES%%bin/llama-eval-callback %%EXAMPLES%%bin/llama-export-lora %%EXAMPLES%%bin/llama-finetune %%EXAMPLES%%bin/llama-gen-docs %%EXAMPLES%%bin/llama-gguf %%EXAMPLES%%bin/llama-gguf-hash %%EXAMPLES%%bin/llama-gguf-split %%EXAMPLES%%bin/llama-gritlm %%EXAMPLES%%bin/llama-imatrix %%EXAMPLES%%bin/llama-lookahead %%EXAMPLES%%bin/llama-lookup %%EXAMPLES%%bin/llama-lookup-create %%EXAMPLES%%bin/llama-lookup-merge %%EXAMPLES%%bin/llama-lookup-stats %%EXAMPLES%%bin/llama-mtmd-cli %%EXAMPLES%%bin/llama-parallel %%EXAMPLES%%bin/llama-passkey %%EXAMPLES%%bin/llama-perplexity %%EXAMPLES%%bin/llama-quantize %%EXAMPLES%%bin/llama-retrieval %%EXAMPLES%%bin/llama-run %%EXAMPLES%%bin/llama-save-load-state %%EXAMPLES%%bin/llama-server %%EXAMPLES%%bin/llama-simple %%EXAMPLES%%bin/llama-simple-chat %%EXAMPLES%%bin/llama-speculative %%EXAMPLES%%bin/llama-speculative-simple %%EXAMPLES%%bin/llama-tokenize %%EXAMPLES%%bin/llama-tts -%%VULKAN%%bin/vulkan-shaders-gen include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h include/ggml-cann.h include/ggml-cpp.h include/ggml-cpu.h include/ggml-cuda.h include/ggml-kompute.h include/ggml-metal.h include/ggml-opt.h include/ggml-rpc.h include/ggml-sycl.h include/ggml-vulkan.h include/ggml.h include/gguf.h include/llama-cpp.h include/llama.h +include/mtmd-helper.h +include/mtmd.h lib/cmake/ggml/ggml-config.cmake lib/cmake/ggml/ggml-version.cmake lib/cmake/llama/llama-config.cmake lib/cmake/llama/llama-version.cmake lib/libggml-base.so lib/libggml-cpu.so %%VULKAN%%lib/libggml-vulkan.so lib/libggml.so lib/libllama.so -lib/libmtmd_shared.so +lib/libmtmd.so libdata/pkgconfig/llama.pc