离线环境如何玩转LLM?Ollama一键部署指南(Ubuntu)
一、离线环境部署背景
有限的网络连接性:有些服务器所处的环境可能没有可靠或连续的网络连接,这时候离线部署就能确保服务器依然可以正常运行,而不需要依赖网络连接。
安全性考虑:一些组织对网络连接安全性要求非常高,不愿意服务器在外部网络上下载或更新软件。通过离线部署,可以避免潜在的网络安全风险。
降低网络成本:在一些地区网络连接成本较高,或者网络不稳定,离线部署可以避免频繁的网络通信和数据传输,节省网络费用。
加速部署过程:在一些场景下,通过离线部署可以加快服务器的部署速度,不需要等待软件或系统下载和安装,可以直接开始运行。
二、Ollama 离线安装(v0.3.12)
Step1、查看服务器CPU的型号
lscpu
Step2、根据CPU型号下载Ollama安装包
# x86_64 CPU选择下载ollama-linux-amd64
# aarch64|arm64 CPU选择下载ollama-linux-arm64
# 下载地址 https://github.com/ollama/ollama/releases/
https://github.com/ollama/ollama/releases/download/v0.3.12/ollama-linux-amd64.tgz
Step3、下载 Ollama 安装脚本
## 下载地址1,浏览器中打开下面地址
https://ollama.com/install.sh
## 下载地址2
https://github.com/ollama/ollama/blob/main/scripts/install.sh
## 国内加速可访问https://gitee.com/ai-big-model/ollama/blob/main/scripts/install.sh
install.sh 脚本如下:
## install.sh
#!/bin/sh
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.
set -eu
status() { echo ">>> $*" >&2; }
error() { echo "ERROR $*"; exit 1; }
warning() { echo "WARNING: $*"; }
TEMP_DIR=$(mktemp -d)
cleanup() { rm -rf $TEMP_DIR; }
trap cleanup EXIT
available() { command -v $1 >/dev/null; }
require() {
local MISSING=''
for TOOL in $*; do
if ! available $TOOL; then
MISSING="$MISSING $TOOL"
fi
done
echo $MISSING
}
[ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'
ARCH=$(uname -m)
case "$ARCH" in
x86_64) ARCH="amd64" ;;
aarch64|arm64) ARCH="arm64" ;;
*) error "Unsupported architecture: $ARCH" ;;
esac
IS_WSL2=false
KERN=$(uname -r)
case "$KERN" in
*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;;
*icrosoft) error "Microsoft WSL1 is not currently supported. Please use WSL2 with 'wsl --set-version <distro> 2'" ;;
*) ;;
esac
VER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}"
SUDO=
if [ "$(id -u)" -ne 0 ]; then
# Running as root, no need for sudo
if ! available sudo; then
error "This script requires superuser permissions. Please re-run as root."
fi
SUDO="sudo"
fi
NEEDS=$(require curl awk grep sed tee xargs)
if [ -n "$NEEDS" ]; then
status "ERROR: The following tools are required but missing:"
for NEED in $NEEDS; do
echo " - $NEED"
done
exit 1
fi
for BINDIR in /usr/local/bin /usr/bin /bin; do
echo $PATH | grep -q $BINDIR && break || continue
done
OLLAMA_INSTALL_DIR=$(dirname ${BINDIR})
status "Installing ollama to $OLLAMA_INSTALL_DIR"
$SUDO install -o0 -g0 -m755 -d $BINDIR
$SUDO install -o0 -g0 -m755 -d "$OLLAMA_INSTALL_DIR"
if curl -I --silent --fail --location "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" >/dev/null ; then
status "Downloading Linux ${ARCH} bundle"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
BUNDLE=1
if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; then
status "Making ollama accessible in the PATH in $BINDIR"
$SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
fi
else
status "Downloading Linux ${ARCH} CLI"
curl --fail --show-error --location --progress-bar -o "$TEMP_DIR/ollama"\
"https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}"
$SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $OLLAMA_INSTALL_DIR/ollama
BUNDLE=0
if [ "$OLLAMA_INSTALL_DIR/ollama" != "$BINDIR/ollama" ] ; then
status "Making ollama accessible in the PATH in $BINDIR"
$SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
fi
fi
install_success() {
status 'The Ollama API is now available at 127.0.0.1:11434.'
status 'Install complete. Run "ollama" from the command line.'
}
trap install_success EXIT
# Everything from this point onwards is optional.
configure_systemd() {
if ! id ollama >/dev/null 2>&1; then
status "Creating ollama user..."
$SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
fi
if getent group render >/dev/null 2>&1; then
status "Adding ollama user to render group..."
$SUDO usermod -a -G render ollama
fi
if getent group video >/dev/null 2>&1; then
status "Adding ollama user to video group..."
$SUDO usermod -a -G video ollama
fi
status "Adding current user to ollama group..."
$SUDO usermod -a -G ollama $(whoami)
status "Creating ollama systemd service..."
cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=$BINDIR/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=$PATH"
[Install]
WantedBy=default.target
EOF
SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)"
case $SYSTEMCTL_RUNNING in
running|degraded)
status "Enabling and starting ollama service..."
$SUDO systemctl daemon-reload
$SUDO systemctl enable ollama
start_service() { $SUDO systemctl restart ollama; }
trap start_service EXIT
;;
esac
}
if available systemctl; then
configure_systemd
fi
# WSL2 only supports GPUs via nvidia passthrough
# so check for nvidia-smi to determine if GPU is available
if [ "$IS_WSL2" = true ]; then
if available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
status "Nvidia GPU detected."
fi
install_success
exit 0
fi
# Install GPU dependencies on Linux
if ! available lspci && ! available lshw; then
warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."
exit 0
fi
check_gpu() {
# Look for devices based on vendor ID for NVIDIA and AMD
case $1 in
lspci)
case $2 in
nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;
amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;;
esac ;;
lshw)
case $2 in
nvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ;;
esac ;;
nvidia-smi) available nvidia-smi || return 1 ;;
esac
}
if check_gpu nvidia-smi; then
status "NVIDIA GPU installed."
exit 0
fi
if ! check_gpu lspci nvidia && ! check_gpu lshw nvidia && ! check_gpu lspci amdgpu && ! check_gpu lshw amdgpu; then
install_success
warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode."
exit 0
fi
if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then
if [ $BUNDLE -ne 0 ]; then
status "Downloading Linux ROCm ${ARCH} bundle"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}-rocm.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
install_success
status "AMD GPU ready."
exit 0
fi
# Look for pre-existing ROCm v6 before downloading the dependencies
for search in "${HIP_PATH:-''}" "${ROCM_PATH:-''}" "/opt/rocm" "/usr/lib64"; do
if [ -n "${search}" ] && [ -e "${search}/libhipblas.so.2" -o -e "${search}/lib/libhipblas.so.2" ]; then
status "Compatible AMD GPU ROCm library detected at ${search}"
install_success
exit 0
fi
done
status "Downloading AMD GPU dependencies..."
$SUDO rm -rf /usr/share/ollama/lib
$SUDO chmod o+x /usr/share/ollama
$SUDO install -o ollama -g ollama -m 755 -d /usr/share/ollama/lib/rocm
curl --fail --show-error --location --progress-bar "https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM}" \
| $SUDO tar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm .
install_success
status "AMD GPU ready."
exit 0
fi
CUDA_REPO_ERR_MSG="NVIDIA GPU detected, but your OS and Architecture are not supported by NVIDIA. Please install the CUDA driver manually https://docs.nvidia.com/cuda/cuda-installation-guide-linux/"
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
install_cuda_driver_yum() {
status 'Installing NVIDIA repository...'
case $PACKAGE_MANAGER in
yum)
$SUDO $PACKAGE_MANAGER -y install yum-utils
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then
$SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo
else
error $CUDA_REPO_ERR_MSG
fi
;;
dnf)
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then
$SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo
else
error $CUDA_REPO_ERR_MSG
fi
;;
esac
case $1 in
rhel)
status 'Installing EPEL repository...'
# EPEL is required for third-party dependencies such as dkms and libvdpau
$SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true
;;
esac
status 'Installing CUDA driver...'
if [ "$1" = 'centos' ] || [ "$1$2" = 'rhel7' ]; then
$SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms
fi
$SUDO $PACKAGE_MANAGER -y install cuda-drivers
}
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
install_cuda_driver_apt() {
status 'Installing NVIDIA repository...'
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb" >/dev/null ; then
curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb
else
error $CUDA_REPO_ERR_MSG
fi
case $1 in
debian)
status 'Enabling contrib sources...'
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null
if [ -f "/etc/apt/sources.list.d/debian.sources" ]; then
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null
fi
;;
esac
status 'Installing CUDA driver...'
$SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb
$SUDO apt-get update
[ -n "$SUDO" ] && SUDO_E="$SUDO -E" || SUDO_E=
DEBIAN_FRONTEND=noninteractive $SUDO_E apt-get -y install cuda-drivers -q
}
if [ ! -f "/etc/os-release" ]; then
error "Unknown distribution. Skipping CUDA installation."
fi
. /etc/os-release
OS_NAME=$ID
OS_VERSION=$VERSION_ID
PACKAGE_MANAGER=
for PACKAGE_MANAGER in dnf yum apt-get; do
if available $PACKAGE_MANAGER; then
break
fi
done
if [ -z "$PACKAGE_MANAGER" ]; then
error "Unknown package manager. Skipping CUDA installation."
fi
if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
case $OS_NAME in
centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;;
rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;;
fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39';;
amzn) install_cuda_driver_yum 'fedora' '37' ;;
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;;
*) exit ;;
esac
fi
if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then
KERNEL_RELEASE="$(uname -r)"
case $OS_NAME in
rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;;
centos|rhel|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;;
fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ;;
debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;;
*) exit ;;
esac
NVIDIA_CUDA_VERSION=$($SUDO dkms status | awk -F: '/added/ { print $1 }')
if [ -n "$NVIDIA_CUDA_VERSION" ]; then
$SUDO dkms install $NVIDIA_CUDA_VERSION
fi
if lsmod | grep -q nouveau; then
status 'Reboot to complete NVIDIA CUDA driver install.'
exit 0
fi
$SUDO modprobe nvidia
$SUDO modprobe nvidia_uvm
fi
# make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
if available nvidia-persistenced; then
$SUDO touch /etc/modules-load.d/nvidia.conf
MODULES="nvidia nvidia-uvm"
for MODULE in $MODULES; do
if ! grep -qxF "$MODULE" /etc/modules-load.d/nvidia.conf; then
echo "$MODULE" | $SUDO tee -a /etc/modules-load.d/nvidia.conf > /dev/null
fi
done
fi
status "NVIDIA GPU ready."
install_success
Step4、修改install.sh脚本
Ollama 安装包与安装脚本放在同一个目录下,总共需要修改两个点:
- Ollama下载地址
- Ollama安装包存放目录
第一处修改,注释下载链接
status "Downloading ollama..."
## 在install.sh的第65行
#curl --fail --show-error --location --progress-bar -o $TEMP_DIR/ollama "https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}"
第二处修改,修改ollama安装目录
status "Installing ollama to $BINDIR..."
$SUDO install -o0 -g0 -m755 -d $BINDIR
## 在install.sh的第73行
#$SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $BINDIR/ollama
$SUDO install -o0 -g0 -m755 ./ollama-linux-amd64 $BINDIR/ollama
status "Installing ollama to $OLLAMA_INSTALL_DIR"
$SUDO install -o0 -g0 -m755 -d $BINDIR
$SUDO install -o0 -g0 -m755 -d "$OLLAMA_INSTALL_DIR"
LOCAL_OLLAMA_TGZ="/path/to/your/local/ollama-linux-${ARCH}.tgz" # 修改此行,指定本地文件路径
if [ -f "$LOCAL_OLLAMA_TGZ" ]; then
status "Using local Linux ${ARCH} bundle"
$SUDO tar -xzf "$LOCAL_OLLAMA_TGZ" -C "$OLLAMA_INSTALL_DIR"
BUNDLE=1
if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; then
status "Making ollama accessible in the PATH in $BINDIR"
$SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
fi
else
status "Local file $LOCAL_OLLAMA_TGZ not found"
exit 1
fi
#if curl -I --silent --fail --location "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" >/dev/null ; then
# status "Downloading Linux ${ARCH} bundle"
# curl --fail --show-error --location --progress-bar \
# "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" | \
# $SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
# BUNDLE=1
# if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; then
# status "Making ollama accessible in the PATH in $BINDIR"
# $SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
# fi
#else
# status "Downloading Linux ${ARCH} CLI"
# curl --fail --show-error --location --progress-bar -o "$TEMP_DIR/ollama"\
# "https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}"
# $SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $OLLAMA_INSTALL_DIR/ollama
# BUNDLE=0
# if [ "$OLLAMA_INSTALL_DIR/ollama" != "$BINDIR/ollama" ] ; then
# status "Making ollama accessible in the PATH in $BINDIR"
# $SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
# fi
#fi
Step5、执行install.sh脚本
运行 install.sh 脚本,开始安装 ollama
# 执行installl.sh脚本,需要sudo 权限 chmod +x install.sh
./install.sh
# 如果报错误权限不足,执行
chmod +x install.sh
# 如果报错误: bash: ./build_android.sh:/bin/sh^M:解释器错误: 没有那个文件或目录,执行
sed -i 's/\r$//' install.sh
Step6、配置大模型下载目录
# 执行命令
vim ~/.bashrc
# 配置 OLLAMA_MODELS 环境变量自定义路径
### ollama model dir 改为自定义的路径,默认路径/usr/share/ollama/.ollama/models
export OLLAMA_MODELS=/data/ollama_cache
# 复制/usr/share/ollama/.ollama/models目录中(blobs manifests)的文件夹到OLLAMA_MODELS环境变量目录
sudo cp -r /usr/share/ollama/.ollama/models /data/ollama_cache
Step7、运行大模型
如通义千问
# 需要先将大模型下载到OLLAMA_MODELS文件中
# ollama run <模型名称>
ollama run qwen
Step8、关闭 Ollama 服务
service ollama stop
三、离线部署 llama 3.1 70b
Step1、下载模型
国内大模型源文件下载地址 https://modelscope.cn/models
国外大模型源文件下载地址https://huggingface.co/models
下载 Meta-Llama-3.1-70B-Instruct 模型到本地机器,
Step2、上传模型
使用 ftp 把模型文件从本地上传到服务器,
Step3、创建 Modelfile
# 创建一个名为 llama-3.1-70b.Modelfile 的文件,内容如下:
FROM ./llama-3.1-70b
TEMPLATE """{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
{{- if .System }}
{{ .System }}
{{- end }}
{{- if .Tools }}
Cutting Knowledge Date: December 2023
When you receive a tool call response, use the output to format an answer to the orginal user question.
You are a helpful assistant with tool calling capabilities.
{{- end }}<|eot_id|>
{{- end }}
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 }}
{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
{{- if and $.Tools $last }}
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
{{ range $.Tools }}
{{- . }}
{{ end }}
Question: {{ .Content }}<|eot_id|>
{{- else }}
{{ .Content }}<|eot_id|>
{{- end }}{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
{{- if .ToolCalls }}
{{ range .ToolCalls }}
{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
{{- else }}
{{ .Content }}
{{- end }}{{ if not $last }}<|eot_id|>{{ end }}
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}
{{- end }}
"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
FROM (必需的) 引入使用的模型或者模型源文件
PARAMETER(参数) 设置大模型的运行参数
TEMPLATE(提示词模板) 用于大模型请求的prompt提示词模板
SYSTEM 设置的大模型默认的系统消息
ADAPTER 定义适用于模型的(Q)LoRA适配器
LICENSE 指定license.
MESSAGE 指定消息历史
不同模型的 Modelfile 内容不同,可参考 Ollama 官网 参数设置
https://github.com/ollama/ollama/blob/main/docs/modelfile.md
Step4、创建模型
ollama create <your-model-name> -f <./Modelfile>
# 创建存储大模型源文件和modelfile文件的目录
sudo mkdir /data/modelfiles
# 上传文件模型到 /data/modelfiles/llama-3.1-70b/
# 上传 llama-3.1-70b-Modelfile /data/modelfiles/
# 进入工作目录
cd /data/modelfiles
# 执行创建命令
ollama create llama3.1:70b -f ./llama-3.1-70b.Modelfile
# 查看创建结果
ollama list
Docker 存储路径跟 /tmp 路径都爆了,先改下 docker 存储路径,
sudo systemctl stop docker
sudo mkdir -p /etc/docker
sudo vim /etc/docker/daemon.json
{
"data-root": "/data/docker"
}
sudo mv /var/lib/docker /data/docker
sudo systemctl start docker
docker info | grep "Docker Root Dir"
修改 tmp 临时路径,
https://github.com/ollama/ollama/issues/5026
https://ollama.fan/resources/troubleshooting/#llm-libraries
https://ollama.qianniu.city/doc/Ollama%20%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98%E8%A7%A3%E7%AD%94.html
-- 方法一
# 执行命令
sudo vim ~/.bashrc
# 配置 OLLAMA_TMPDIR 环境变量自定义路径
### ollama tmpdir 改为自定义的路径,默认路径 /tmp
export OLLAMA_TMPDIR=/data/tmp
# 关键点:变量改这个
export TMPDIR=/data/tmp
# 刷新配置
source ~/.bashrc
sudo systemctl daemon-reload
sudo systemctl restart ollama.service
sudo systemctl status ollama
--- 方法二
# sudo systemctl edit ollama.service
sudo vim /etc/systemd/system/ollama.service
[Service]
Environment="OLLAMA_TMPDIR=/data/tmp"
Environment="OLLAMA_MODELS=/data/ollama_cache"
Environment="TMPDIR=/data/tmp"
Environment="TEMP=/data/tmp"
cd /data
sudo chown -R ollama:ollama tmp
sudo chown -R ollama:ollama ollama_cache
sudo systemctl daemon-reload
sudo systemctl restart ollama
启动报错,看了日志发现没权限,
# 查看日志
# journalctl -u ollama
sudo chmod -R 777 /data/ollama_cache/blobs
sudo chmod -R 777 /data/tmp
重新创建模型,ok,
Step5、运行模型
ollama run <your-model-name>
# 运行步骤3创建的llama3.1:70b大模型
sudo ollama run llama3.1:70b
# 退出命令
/bye
Step6、GPU 启用
如果您的系统中有多个 NVIDIA GPU 并且希望限制 Ollama 使用的子集,您可以将 CUDA_VISIBLE_DEVICES
设置为 GPU 的逗号分隔列表。可以使用数字 ID,但顺序可能会变化,因此 UUID 更可靠。您可以通过运行 nvidia-smi -L
来发现您的 GPU 的 UUID。如果您想忽略 GPU 并强制使用 CPU,请使用无效的 GPU ID(例如,“-1”)
# 查看 GPG
nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 4090 (UUID: GPU-d52b63db-f09a-042f-57b7-37821b0ef749)
GPU 1: NVIDIA GeForce RTX 4090 (UUID: GPU-1b105eed-99be-b347-62bb-1d71b4e1daa3)
GPU 2: NVIDIA GeForce RTX 4090 (UUID: GPU-3ef73b84-a087-d6ea-b607-55797c2058ea)
GPU 3: NVIDIA GeForce RTX 4090 (UUID: GPU-4f8c7d53-5352-f22a-a477-00022499a1da)
GPU 4: NVIDIA GeForce RTX 4090 (UUID: GPU-0d24d3af-14ba-1663-911f-7be082ec3991)
GPU 5: NVIDIA GeForce RTX 4090 (UUID: GPU-81e7f2e2-3461-b33a-5c38-a5ba0cd6f2c4)
GPU 6: NVIDIA GeForce RTX 4090 (UUID: GPU-38529f01-1ebd-222e-8d5b-dd2d0ccd4aac)
GPU 7: NVIDIA GeForce RTX 4090 (UUID: GPU-c25feb89-f89d-afe8-263c-d7887b3b7c31)
# journalctl -u ollama
# 修改到 2m0s 减少 GPU 占用
# OLLAMA_KEEP_ALIVE:5m0s
sudo vim /etc/systemd/system/ollama.service
[Service]
...
Environment="CUDA_VISIBLE_DEVICES=GPU-d52b63db-f09a-042f-57b7-37821b0ef749,GPU-1b105eed-99be-b347-62bb-1d71b4e1daa3,GPU-3ef73b84-a087-d6ea-b607-55797c2058ea,GPU-4f8c7d53-5352-f22a-a477-00022499a1da,GPU-0d24d3af-14ba-1663-911f-7be082ec3991,GPU-81e7f2e2-3461-b33a-5c38-a5ba0cd6f2c4,GPU-38529f01-1ebd-222e-8d5b-dd2d0ccd4aac,GPU-c25feb89-f89d-afe8-263c-d7887b3b7c31"
Environment="OLLAMA_KEEP_ALIVE=2m0s"
sudo systemctl daemon-reload
sudo systemctl restart ollama