This commit is contained in:
Adrian Gunnar Lauterer 2025-01-29 12:51:19 +01:00
parent 8ca7bb1890
commit e4cbc21843
3 changed files with 61 additions and 17 deletions

View File

@ -6,6 +6,11 @@
enable32Bit = true;
};
nixpkgs.config = {
allowUnfree = true;
cudaSupport = true;
};
# Load nvidia driver for Xorg and Wayland
services.xserver.videoDrivers = ["nvidia"];
boot.initrd.kernelModules = [ "nvidia" ];
@ -18,7 +23,7 @@
# Use the NVidia open source kernel module (not to be confused with the independent third-party "nouveau" open source driver).
# Currently alpha-quality/buggy, so false is currently the recommended setting.
open = false;
open = false; #need proprietary for cuda.
# Enable the Nvidia settings menu, accessible via `nvidia-settings`.
#nvidiaSettings = true;
@ -29,11 +34,24 @@
# Enable the CUDA toolkit
#install packages
environment.systemPackages = with pkgs; [
cudaPackages.cudnn
environment.systemPackages = with pkgs; [
cudaPackages.cudatoolkit
cudaPackages.cudnn
nvtopPackages.nvidia
gcc
cudaPackages.nccl
cmake
#llama-cpp
#python3Packages.pip
#cudaPackages.cuda_cudart
#xgboostWithCuda
#libxcrypt-legacy
#cudaPackages.setupCudaHook
#cudaPackages.markForCudatoolkitRootHook
#cudaPackages.cuda_cudart.static
pkgs.cudaPackages.libcublas
#cudaPackages.tensorrt_8_6_0 #needs to be added manually, to the store and is a pain because of the license agreement and garbage collection
];
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [

View File

@ -4,14 +4,37 @@
pkgs.unstable.ollama
];
services.ollama.enable = true;
services.ollama.package = pkgs.unstable.ollama;
services.ollama.host = "0.0.0.0";
services.ollama.port = 11434;
services.ollama.models = "/var/lib/ollama/models";
services.ollama.home = "/var/lib/ollama";
services.ollama = {
enable = true;
package = pkgs.unstable.ollama;
host = "0.0.0.0";
openFirewall = true;
port = 11434;
home = "/var/lib/ollama";
#possibly a flawed idea.
loadModels = [
"llama3.2"
"gemma2:2b"
"qwen2.5:3b"
"llama3.2-vision"
"llava-phi3"
"llava-llama3"
"moondream"
"minicpm-v"
"llama3.1"
"mistral-nemo"
"phi4"
"zylonai/multilingual-e5-large"
"nomic-embed-text"
"snowflake-arctic-embed"
];
};
#possibly a flawed idea, should just set cudaSupport and rocm support.
services.ollama.acceleration = lib.mkDefault ( let
hostname = config.networking.hostName;
in

View File

@ -3,13 +3,16 @@
environment.systemPackages = [
pkgs.unstable.open-webui
pkgs.gvisor
pkgs.bash
];
services.tika.enable=true;
services.tika.openFirewall=true;
services.tika.listenAddress = "localhost";
services.tika = {
enable=true;
openFirewall=true;
listenAddress = "localhost";
enableOcr = true;
};
services.open-webui = {
enable = true;
@ -19,7 +22,7 @@
host = "0.0.0.0";
openFirewall = true;
enviroment = {
environment = {
ANONYMIZED_TELEMETRY = "False";
DO_NOT_TRACK = "True";
SCARF_NO_ANALYTICS = "True";