diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest index 3085f9c757..aa174fbbd6 100644 --- a/sci-misc/llama-cpp/Manifest +++ b/sci-misc/llama-cpp/Manifest @@ -1,2 +1,3 @@ DIST llama-cpp-0_pre4576.tar.gz 20506059 BLAKE2B 8f011811e4df1f8d0c26b19f96a709980e078dc7e769b33cbbb03a852a29b489f80c8a1e298fecea53997068f6b7897e4536ba5db289aa445a1a6f16f98adce3 SHA512 21150721524283454ab53e370fdaf4e766f89fbb8d4b43072b10657d8c8b686630616cddbae7954147a2ba0360ad20c4643761f3774481e13a7b180812935c4e DIST llama-cpp-0_pre4763.tar.gz 20737582 BLAKE2B f6cb6885465e144c19698ac65410f59a6cc2b78d511968bc26c521ba90be87d102eb413e3ef903da30dae3336780e80a4c20cbbea30cc67375f790567e0e6e7a SHA512 c48923286e717d734a3414ae12182c869dd0a99fde722b46d48822a9cbcc5fc16ec5ade4108bd463990a3c9880ea58b559ba0a6975d04c348b474893df566bc9 +DIST llama-cpp-0_pre4848.tar.gz 20799416 BLAKE2B 6731dd8ab01f66fca24ad385c2611bc4a11df8beda692e88d79e1d6ab931c908d5a5d304d9c423d43c09d89e80c0b8dd1d57be23cb1a3bb522dbeac112ded604 SHA512 4e08cd24a96ad7c96abdc834d4b5d2d74ce01dd8774d81b693c25b890a0982ca9135cfb743b02f886277d2d0fc92c4e4b330e9acf0977fe00b2f1d4df70243d3 diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild b/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild new file mode 100644 index 0000000000..b4db64b49d --- /dev/null +++ b/sci-misc/llama-cpp/llama-cpp-0_pre4848.ebuild @@ -0,0 +1,93 @@ +# Copyright 2025 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +ROCM_VERSION="6.3" + +inherit cmake rocm + +if [[ "${PV}" != "9999" ]]; then + KEYWORDS="~amd64" + MY_PV="b${PV#0_pre}" + S="${WORKDIR}/llama.cpp-${MY_PV}" + SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz -> ${P}.tar.gz" +else + inherit git-r3 + EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git" +fi + +DESCRIPTION="Port of Facebook's LLaMA model in C/C++" +HOMEPAGE="https://github.com/ggerganov/llama.cpp" + +LICENSE="MIT" +SLOT="0" +CPU_FLAGS_X86=( avx avx2 f16c ) +IUSE="curl openblas blis hip" +REQUIRED_USE="?? ( openblas blis )" + +AMDGPU_TARGETS_COMPAT=( + gfx900 + gfx90c + gfx902 + gfx1010 + gfx1011 + gfx1012 + gfx1030 + gfx1031 + gfx1032 + gfx1034 + gfx1035 + gfx1036 + gfx1100 + gfx1101 + gfx1102 + gfx1103 + gfx1150 + gfx1151 +) + +# curl is needed for pulling models from huggingface +# numpy is used by convert_hf_to_gguf.py +DEPEND=" + curl? ( net-misc/curl:= ) + openblas? ( sci-libs/openblas:= ) + blis? ( sci-libs/blis:= ) + hip? ( >=dev-util/hip-6.3:= ) +" +RDEPEND="${DEPEND} + dev-python/numpy +" +PATCHES=( "${FILESDIR}/blas-ld.diff" ) + +src_configure() { + local mycmakeargs=( + -DLLAMA_BUILD_TESTS=OFF + -DLLAMA_BUILD_SERVER=ON + -DCMAKE_SKIP_BUILD_RPATH=ON + -DGGML_NATIVE=0 # don't set march + -DLLAMA_CURL=$(usex curl ON OFF) + -DBUILD_NUMBER="1" + ) + + if use openblas ; then + mycmakeargs+=( + -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS + ) + fi + + if use blis ; then + mycmakeargs+=( + -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME + ) + fi + + if use hip; then + rocm_use_hipcc + mycmakeargs+=( + -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags) + ) + fi + + cmake_src_configure +}