mirror of
https://github.com/adrlau/nix-dotfiles.git
synced 2025-02-08 16:00:50 +01:00
ai
This commit is contained in:
parent
8ca7bb1890
commit
e4cbc21843
@ -5,6 +5,11 @@
|
||||
enable = true;
|
||||
enable32Bit = true;
|
||||
};
|
||||
|
||||
nixpkgs.config = {
|
||||
allowUnfree = true;
|
||||
cudaSupport = true;
|
||||
};
|
||||
|
||||
# Load nvidia driver for Xorg and Wayland
|
||||
services.xserver.videoDrivers = ["nvidia"];
|
||||
@ -18,7 +23,7 @@
|
||||
|
||||
# Use the NVidia open source kernel module (not to be confused with the independent third-party "nouveau" open source driver).
|
||||
# Currently alpha-quality/buggy, so false is currently the recommended setting.
|
||||
open = false;
|
||||
open = false; #need proprietary for cuda.
|
||||
|
||||
# Enable the Nvidia settings menu, accessible via `nvidia-settings`.
|
||||
#nvidiaSettings = true;
|
||||
@ -29,11 +34,24 @@
|
||||
|
||||
# Enable the CUDA toolkit
|
||||
#install packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
cudaPackages.cudnn
|
||||
environment.systemPackages = with pkgs; [
|
||||
cudaPackages.cudatoolkit
|
||||
cudaPackages.cudnn
|
||||
nvtopPackages.nvidia
|
||||
gcc
|
||||
cudaPackages.nccl
|
||||
cmake
|
||||
#llama-cpp
|
||||
#python3Packages.pip
|
||||
#cudaPackages.cuda_cudart
|
||||
#xgboostWithCuda
|
||||
#libxcrypt-legacy
|
||||
#cudaPackages.setupCudaHook
|
||||
#cudaPackages.markForCudatoolkitRootHook
|
||||
#cudaPackages.cuda_cudart.static
|
||||
pkgs.cudaPackages.libcublas
|
||||
#cudaPackages.tensorrt_8_6_0 #needs to be added manually, to the store and is a pain because of the license agreement and garbage collection
|
||||
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
|
||||
|
@ -4,21 +4,44 @@
|
||||
pkgs.unstable.ollama
|
||||
];
|
||||
|
||||
services.ollama.enable = true;
|
||||
services.ollama.package = pkgs.unstable.ollama;
|
||||
services.ollama.host = "0.0.0.0";
|
||||
services.ollama.port = 11434;
|
||||
services.ollama.models = "/var/lib/ollama/models";
|
||||
services.ollama.home = "/var/lib/ollama";
|
||||
|
||||
#possibly a flawed idea.
|
||||
services.ollama = {
|
||||
enable = true;
|
||||
package = pkgs.unstable.ollama;
|
||||
host = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
port = 11434;
|
||||
home = "/var/lib/ollama";
|
||||
|
||||
loadModels = [
|
||||
"llama3.2"
|
||||
"gemma2:2b"
|
||||
"qwen2.5:3b"
|
||||
|
||||
"llama3.2-vision"
|
||||
"llava-phi3"
|
||||
"llava-llama3"
|
||||
"moondream"
|
||||
"minicpm-v"
|
||||
|
||||
"llama3.1"
|
||||
"mistral-nemo"
|
||||
"phi4"
|
||||
|
||||
"zylonai/multilingual-e5-large"
|
||||
"nomic-embed-text"
|
||||
"snowflake-arctic-embed"
|
||||
|
||||
];
|
||||
};
|
||||
|
||||
#possibly a flawed idea, should just set cudaSupport and rocm support.
|
||||
services.ollama.acceleration = lib.mkDefault ( let
|
||||
hostname = config.networking.hostName;
|
||||
in
|
||||
if hostname == "galadriel" then "cuda"
|
||||
else if hostname == "aragorn" then "rocm"
|
||||
else null);
|
||||
|
||||
|
||||
services.nginx.virtualHosts."ollama.${config.networking.hostName}.${config.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
#useACMEHost = config.networking.domain; #not sure if this will work, unless
|
||||
|
@ -3,13 +3,16 @@
|
||||
environment.systemPackages = [
|
||||
pkgs.unstable.open-webui
|
||||
pkgs.gvisor
|
||||
pkgs.bash
|
||||
|
||||
];
|
||||
|
||||
services.tika.enable=true;
|
||||
services.tika.openFirewall=true;
|
||||
services.tika.listenAddress = "localhost";
|
||||
|
||||
services.tika = {
|
||||
enable=true;
|
||||
openFirewall=true;
|
||||
listenAddress = "localhost";
|
||||
enableOcr = true;
|
||||
};
|
||||
|
||||
services.open-webui = {
|
||||
enable = true;
|
||||
@ -19,7 +22,7 @@
|
||||
host = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
|
||||
enviroment = {
|
||||
environment = {
|
||||
ANONYMIZED_TELEMETRY = "False";
|
||||
DO_NOT_TRACK = "True";
|
||||
SCARF_NO_ANALYTICS = "True";
|
||||
|
Loading…
Reference in New Issue
Block a user