mirror of
https://github.com/gentoo-mirror/guru.git
synced 2025-04-04 01:18:33 -04:00
sci-ml/ollama: drop 0.5.7
Closes: https://bugs.gentoo.org/950634 Closes: https://bugs.gentoo.org/950483 Signed-off-by: Paul Zander <negril.nx+gentoo@gmail.com>
This commit is contained in:
parent
6ffd638c2e
commit
362f685874
@ -1,4 +1,2 @@
|
||||
DIST ollama-0.5.7-deps.tar.xz 115361652 BLAKE2B 8694b35ef2545481c2e1f6a9d7c462177f25e78aab79288197ec4b33190a905c7e92e54734725cde7f122e1161cbdaf9c72ae40cbeb0b8ee0af9075e3dbb6691 SHA512 d7abe6266120cb9e731e53f6e14e16d8991e76fbab06348de945f53dc407788324b850308e7200616d92dd17a417ad0a2491eddd543bbe2cfee6a66c8ab81840
|
||||
DIST ollama-0.5.7.gh.tar.gz 2345089 BLAKE2B 7d5063e9f665ab2a957d449b38017e6a9bb435c938749161c711cfc35d8a0361e7f4db214e0782f3b51c70c909fc5be8b76ca342cda6163b5aca5fdd733c55d9 SHA512 ea8adcec4f8f932c422a400b8cafb4b983bfa0721cd14383ceb8e0a4f588ecd1289d2e1de46a916c1b34d13e5dab2825ef11a37fc3e797345348dea3bd9144fe
|
||||
DIST ollama-0.6.3-vendor.tar.xz 4417068 BLAKE2B 33aabb08f8582c7f211ebfc51c95af96d7ce1402c6350b656fb0114fe849901ea65f22bf833175a87aa318a685f35c0941b236dfcbf84c81d7766a91d66f6db3 SHA512 79044b751daba3d463d7890792d5a6581e96317951de763191b128237e90e81dac578b674654d527e70d48fa059cb62f40831554c432a2a424d659790dbd7a10
|
||||
DIST ollama-0.6.3.gh.tar.gz 7950300 BLAKE2B 4a14bab714b11104753caafe58a1301490d6a24d7af90a374741b6977fafb47792a50fa570e0883b13d5275f621ae1516f2d4055f6c32583e42821725d65e59f SHA512 28a0688f1aa35a161e09af19b643b2a53115fa29ce0b522ed2bd76bcaedc710553a266af2370886bc23408025935f786d13f07297ffb3e783b13132237f10712
|
||||
|
@ -1,24 +0,0 @@
|
||||
From d711567ba482e80520b5cc36026c80f55f721319 Mon Sep 17 00:00:00 2001
|
||||
From: Paul Zander <negril.nx@gmail.com>
|
||||
Date: Sat, 25 Jan 2025 19:00:31 +0100
|
||||
Subject: [PATCH] include cstdint
|
||||
|
||||
---
|
||||
llama/llama-mmap.h | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/llama/llama-mmap.h b/llama/llama-mmap.h
|
||||
index ebd7dc16..4c8e3929 100644
|
||||
--- a/llama/llama-mmap.h
|
||||
+++ b/llama/llama-mmap.h
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
+#include <cstdint>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,184 +0,0 @@
|
||||
# Copyright 2024-2025 Gentoo Authors
|
||||
# Distributed under the terms of the GNU General Public License v2
|
||||
|
||||
EAPI=8
|
||||
|
||||
ROCM_VERSION=6.1
|
||||
inherit cuda rocm
|
||||
inherit go-module systemd
|
||||
|
||||
DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other language models."
|
||||
HOMEPAGE="https://ollama.com"
|
||||
|
||||
if [[ ${PV} == *9999* ]]; then
|
||||
inherit git-r3
|
||||
EGIT_REPO_URI="https://github.com/ollama/ollama.git"
|
||||
else
|
||||
SRC_URI="
|
||||
https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz -> ${P}.gh.tar.gz
|
||||
https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz
|
||||
"
|
||||
KEYWORDS="~amd64"
|
||||
fi
|
||||
|
||||
LICENSE="MIT"
|
||||
SLOT="0"
|
||||
|
||||
X86_CPU_FLAGS=(
|
||||
avx
|
||||
avx2
|
||||
avx512f
|
||||
avx512vbmi
|
||||
avx512_vnni
|
||||
avx512_bf16
|
||||
)
|
||||
CPU_FLAGS=("${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}")
|
||||
IUSE="${CPU_FLAGS[*]} cuda rocm"
|
||||
|
||||
REQUIRED_USE="
|
||||
cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
|
||||
cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
|
||||
cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
|
||||
cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
|
||||
cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
|
||||
"
|
||||
|
||||
DEPEND="
|
||||
>=dev-lang/go-1.23.4
|
||||
cuda? (
|
||||
dev-util/nvidia-cuda-toolkit:=
|
||||
)
|
||||
rocm? (
|
||||
>=sci-libs/hipBLAS-${ROCM_VERSION}:=[${ROCM_USEDEP}]
|
||||
)
|
||||
"
|
||||
|
||||
RDEPEND="
|
||||
acct-group/${PN}
|
||||
acct-user/${PN}[cuda?]
|
||||
"
|
||||
|
||||
PATCHES=(
|
||||
"${FILESDIR}/${PN}-0.5.7-include-cstdint.patch"
|
||||
)
|
||||
|
||||
pkg_pretend() {
|
||||
if use rocm; then
|
||||
ewarn "WARNING: AMD support in this ebuild are experimental"
|
||||
einfo "If you run into issues, especially compiling dev-libs/rocm-opencl-runtime"
|
||||
einfo "you may try the docker image here https://github.com/ROCm/ROCm-docker"
|
||||
einfo "and follow instructions here"
|
||||
einfo "https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html"
|
||||
fi
|
||||
}
|
||||
|
||||
src_prepare() {
|
||||
default
|
||||
|
||||
sed \
|
||||
-e "s/(CFLAGS)/(NVCCFLAGS)/g" \
|
||||
-e "s/(CXXFLAGS)/(NVCCFLAGS)/g" \
|
||||
-i make/cuda.make || die
|
||||
|
||||
if use rocm; then
|
||||
# --hip-version gets appended to the compile flags which isn't a known flag.
|
||||
# This causes rocm builds to fail because -Wunused-command-line-argument is turned on.
|
||||
# Use nuclear option to fix this.
|
||||
# Disable -Werror's from go modules.
|
||||
find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || die
|
||||
fi
|
||||
}
|
||||
|
||||
src_configure() {
|
||||
local CUSTOM_CPU_FLAGS=()
|
||||
use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+=("avx")
|
||||
use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=("avx2")
|
||||
use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=("avx512")
|
||||
use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=("avx512vbmi")
|
||||
use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=("avx512vnni")
|
||||
use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=("avx512bf16")
|
||||
|
||||
# Build basic ollama executable with cpu features built in
|
||||
emakeargs=(
|
||||
# CCACHE=""
|
||||
"CUSTOM_CPU_FLAGS=$(
|
||||
IFS=','
|
||||
echo "${CUSTOM_CPU_FLAGS[*]}"
|
||||
)"
|
||||
)
|
||||
|
||||
if use cuda; then
|
||||
export NVCC_CCBIN
|
||||
NVCC_CCBIN="$(cuda_gccdir)"
|
||||
|
||||
if [[ -n ${CUDAARCHS} ]]; then
|
||||
emakeargs+=(
|
||||
CUDA_ARCHITECTURES="${CUDAARCHS}"
|
||||
)
|
||||
fi
|
||||
|
||||
if has_version "=dev-util/nvidia-cuda-toolkit-12*"; then
|
||||
emakeargs+=(
|
||||
CUDA_12_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
|
||||
CUDA_12_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
|
||||
)
|
||||
fi
|
||||
|
||||
if has_version "=dev-util/nvidia-cuda-toolkit-11*"; then
|
||||
emakeargs+=(
|
||||
CUDA_11_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
|
||||
CUDA_11_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
|
||||
)
|
||||
fi
|
||||
|
||||
cuda_add_sandbox -w
|
||||
else
|
||||
emakeargs+=(OLLAMA_SKIP_CUDA_GENERATE="1")
|
||||
fi
|
||||
|
||||
if use rocm; then
|
||||
emakeargs+=(
|
||||
HIP_ARCHS="$(get_amdgpu_flags)"
|
||||
HIP_PATH="${EPREFIX}/usr"
|
||||
)
|
||||
|
||||
check_amdgpu
|
||||
else
|
||||
emakeargs+=(OLLAMA_SKIP_ROCM_GENERATE="1")
|
||||
fi
|
||||
|
||||
emake "${emakeargs[@]}" help-runners
|
||||
export emakeargs
|
||||
}
|
||||
|
||||
src_compile() {
|
||||
emake "${emakeargs[@]}" dist
|
||||
}
|
||||
|
||||
src_install() {
|
||||
dobin "dist/linux-${ARCH}/bin/ollama"
|
||||
|
||||
if [[ -d "dist/linux-${ARCH}/lib/ollama" ]]; then
|
||||
insinto /usr/lib
|
||||
doins -r "dist/linux-${ARCH}/lib/ollama"
|
||||
fi
|
||||
|
||||
if use rocm; then
|
||||
fperms +x /usr/lib/ollama/runners/rocm/ollama_llama_server
|
||||
fi
|
||||
|
||||
doinitd "${FILESDIR}"/ollama.init
|
||||
systemd_dounit "${FILESDIR}"/ollama.service
|
||||
}
|
||||
|
||||
pkg_preinst() {
|
||||
keepdir /var/log/ollama
|
||||
fowners ollama:ollama /var/log/ollama
|
||||
}
|
||||
|
||||
pkg_postinst() {
|
||||
einfo "Quick guide:"
|
||||
einfo "ollama serve"
|
||||
einfo "ollama run llama3:70b"
|
||||
einfo "See available models at https://ollama.com/library"
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user