update configuration of our ollama service

This commit is contained in:
♥ Minnie ♥ 2025-09-26 23:28:15 +08:00
parent b0be0f9042
commit 2f545a818f
Signed by: jasmine
GPG key ID: 8563E358D4E8040E

View file

@ -3,6 +3,10 @@
services.ollama = {
enable = true;
# User and group under which to run ollama
user = "ollama";
group = "ollama";
# AMD GPU Support
acceleration = "rocm";
# 5700xt Support
@ -11,9 +15,29 @@
# Language models to install
loadModels = [
"deepseek-r1:8b"
"gemma3:12b"
"gemma3:4b"
"qwen3:8b"
"llama3:8b"
# Coding models
"qwen2.5-coder:7b"
];
# Location to store models
models = "/srv/ollama/models";
};
# Enable the Open-WebUI server
services.open-webui = {
enable = true;
};
# Mount our subvolume for storage of models
fileSystems = {
"/srv/ollama" = {
device = "/dev/disk/by-label/data";
fsType = "btrfs";
options = ["subvol=srv-ollama" "compress=zstd"];
};
};
}