mirror of
https://github.com/adrlau/nix-dotfiles.git
synced 2026-04-22 23:10:44 +02:00
@@ -6,6 +6,8 @@ nix --extra-experimental-features "nix-command flakes" build ".#nixosConfigurati
|
||||
nixos-rebuild switch --update-input nixpkgs --update-input unstable --no-write-lock-file --refresh --flake git+https://github.com/adrlau/nix-dotfiles.git --upgrade
|
||||
|
||||
|
||||
|
||||
|
||||
show flake attrs
|
||||
```nix flake show .#```
|
||||
|
||||
@@ -14,3 +16,5 @@ why depends:
|
||||
```nix why-depends /run/current-system /nix/store/...```
|
||||
```nix why-depends .#```
|
||||
```nix why-depends .#nixosConfigurations.galadriel nixpkgs#python312Packages.botorch```
|
||||
```nix why-depends .\#nixosConfigurations.eowyn.config.system.build.toplevel pkgs.python3.12-libarcus-4.12.0 --impure```
|
||||
|
||||
|
||||
@@ -5,6 +5,11 @@
|
||||
enable = true;
|
||||
enable32Bit = true;
|
||||
};
|
||||
|
||||
nixpkgs.config = {
|
||||
allowUnfree = true;
|
||||
cudaSupport = true;
|
||||
};
|
||||
|
||||
# Load nvidia driver for Xorg and Wayland
|
||||
services.xserver.videoDrivers = ["nvidia"];
|
||||
@@ -18,7 +23,7 @@
|
||||
|
||||
# Use the NVidia open source kernel module (not to be confused with the independent third-party "nouveau" open source driver).
|
||||
# Currently alpha-quality/buggy, so false is currently the recommended setting.
|
||||
open = false;
|
||||
open = false; #need proprietary for cuda.
|
||||
|
||||
# Enable the Nvidia settings menu, accessible via `nvidia-settings`.
|
||||
#nvidiaSettings = true;
|
||||
@@ -29,11 +34,24 @@
|
||||
|
||||
# Enable the CUDA toolkit
|
||||
#install packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
cudaPackages.cudnn
|
||||
environment.systemPackages = with pkgs; [
|
||||
cudaPackages.cudatoolkit
|
||||
cudaPackages.cudnn
|
||||
nvtopPackages.nvidia
|
||||
gcc
|
||||
cudaPackages.nccl
|
||||
cmake
|
||||
#llama-cpp
|
||||
#python3Packages.pip
|
||||
#cudaPackages.cuda_cudart
|
||||
#xgboostWithCuda
|
||||
#libxcrypt-legacy
|
||||
#cudaPackages.setupCudaHook
|
||||
#cudaPackages.markForCudatoolkitRootHook
|
||||
#cudaPackages.cuda_cudart.static
|
||||
pkgs.cudaPackages.libcublas
|
||||
#cudaPackages.tensorrt_8_6_0 #needs to be added manually, to the store and is a pain because of the license agreement and garbage collection
|
||||
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
let
|
||||
openWebuiImage = "ghcr.io/open-webui/open-webui:main";
|
||||
in
|
||||
{
|
||||
virtualisation.oci-containers = {
|
||||
backend = {
|
||||
image = openWebuiImage;
|
||||
cmd = [ "-d" "--network=host" "-v" "open-webui:/app/backend/data" "--name" "open-webui" "--restart" "always" ];
|
||||
volumes = [ "open-webui:/app/backend/data" ];
|
||||
environment = {
|
||||
OLLAMA_BASE_URL = "http://127.0.0.1:11434";
|
||||
};
|
||||
restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."chat.${config.networking.hostName}.${config.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
#useACMEHost = config.networking.domain; #not sure if this will work, unless
|
||||
locations."/" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://${config.services.ollama.listenAddress}";
|
||||
};
|
||||
basicAuthFile = config.sops.secrets."nginx/defaultpass".path;
|
||||
};
|
||||
}
|
||||
@@ -4,21 +4,44 @@
|
||||
pkgs.unstable.ollama
|
||||
];
|
||||
|
||||
services.ollama.enable = true;
|
||||
services.ollama.package = pkgs.unstable.ollama;
|
||||
services.ollama.host = "0.0.0.0";
|
||||
services.ollama.port = 11434;
|
||||
services.ollama.models = "/var/lib/ollama/models";
|
||||
services.ollama.home = "/var/lib/ollama";
|
||||
|
||||
#possibly a flawed idea.
|
||||
services.ollama = {
|
||||
enable = true;
|
||||
package = pkgs.unstable.ollama;
|
||||
host = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
port = 11434;
|
||||
home = "/var/lib/ollama";
|
||||
|
||||
loadModels = [
|
||||
"llama3.2"
|
||||
"gemma2:2b"
|
||||
"qwen2.5:3b"
|
||||
|
||||
"llama3.2-vision"
|
||||
"llava-phi3"
|
||||
"llava-llama3"
|
||||
"moondream"
|
||||
"minicpm-v"
|
||||
|
||||
"llama3.1"
|
||||
"mistral-nemo"
|
||||
"phi4"
|
||||
|
||||
"zylonai/multilingual-e5-large"
|
||||
"nomic-embed-text"
|
||||
"snowflake-arctic-embed"
|
||||
|
||||
];
|
||||
};
|
||||
|
||||
#possibly a flawed idea, should just set cudaSupport and rocm support.
|
||||
services.ollama.acceleration = lib.mkDefault ( let
|
||||
hostname = config.networking.hostName;
|
||||
in
|
||||
if hostname == "galadriel" then "cuda"
|
||||
else if hostname == "aragorn" then "rocm"
|
||||
else null);
|
||||
|
||||
|
||||
services.nginx.virtualHosts."ollama.${config.networking.hostName}.${config.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
#useACMEHost = config.networking.domain; #not sure if this will work, unless
|
||||
|
||||
@@ -2,9 +2,18 @@
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.unstable.open-webui
|
||||
pkgs.gvisor
|
||||
pkgs.bash
|
||||
|
||||
];
|
||||
|
||||
|
||||
services.tika = {
|
||||
enable=true;
|
||||
openFirewall=true;
|
||||
listenAddress = "localhost";
|
||||
enableOcr = true;
|
||||
};
|
||||
|
||||
services.open-webui = {
|
||||
enable = true;
|
||||
|
||||
@@ -13,5 +22,12 @@
|
||||
host = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
|
||||
environment = {
|
||||
ANONYMIZED_TELEMETRY = "False";
|
||||
DO_NOT_TRACK = "True";
|
||||
SCARF_NO_ANALYTICS = "True";
|
||||
PDF_EXTRACT_IMAGES = "False";
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user