diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index f59d16d148d3..3230207092e5 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,76 +1,77 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b DISTVERSION= 5097 -PORTREVISION= 1 +PORTREVISION= 2 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 BROKEN_i386= compilation fails, see https://github.com/ggerganov/llama.cpp/issues/9545 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= GGML_NATIVE \ + FREEBSD_ALLOW_ADVANCED_CPU_FEATURES \ LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS # user for llama-server, only used when EXAMPLES=ON USER= nobody SUB_LIST= USER=${USER} OPTIONS_DEFINE= CURL EXAMPLES VULKAN OPTIONS_DEFAULT= CURL VULKAN OPTIONS_SUB= yes CURL_DESCR= Use libcurl to download model from an URL CURL_CMAKE_BOOL= LLAMA_CURL CURL_USES= localbase CURL_LIB_DEPENDS= libcurl.so:ftp/curl EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false \ python=${PYTHON_CMD} # for tests do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/issues/10955 @cd ${WRKSRC} && \ ${SETENV} ${MAKE_ENV} bash ci/run.sh ./tmp/results ./tmp/mnt .include .if ${PORT_OPTIONS:MEXAMPLES} USE_RC_SUBR= llama-server .endif # tests as of 4458: 97% tests passed, 1 tests failed out of 31, see https://github.com/ggerganov/llama.cpp/issues/11036 # tests as of 4649: # 88% tests passed, 4 tests failed out of 32 # The following tests FAILED: # 18 - test-chat (Subprocess aborted) main # see https://github.com/ggerganov/llama.cpp/issues/11705 # 24 - test-gguf (SEGFAULT) main # 25 - test-backend-ops (SEGFAULT) main # 32 - test-eval-callback (SEGFAULT) curl eval-callback .include diff --git a/misc/llama-cpp/files/patch-ggml_src_ggml-cpu_CMakeLists.txt b/misc/llama-cpp/files/patch-ggml_src_ggml-cpu_CMakeLists.txt new file mode 100644 index 000000000000..5e6ff248184a --- /dev/null +++ b/misc/llama-cpp/files/patch-ggml_src_ggml-cpu_CMakeLists.txt @@ -0,0 +1,11 @@ +--- ggml/src/ggml-cpu/CMakeLists.txt.orig 2025-04-10 23:26:06 UTC ++++ ggml/src/ggml-cpu/CMakeLists.txt +@@ -236,7 +236,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) + else () + if (GGML_NATIVE) + list(APPEND ARCH_FLAGS -march=native) +- else () ++ elseif (FREEBSD_ALLOW_ADVANCED_CPU_FEATURES) + list(APPEND ARCH_FLAGS -msse4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + if (GGML_F16C)