diff --git a/projects/invokeai/default.nix b/projects/invokeai/default.nix index 77e22490..8cb64e23 100644 --- a/projects/invokeai/default.nix +++ b/projects/invokeai/default.nix @@ -1,4 +1,4 @@ -{ inputs, lib, ... }: +{ config, inputs, lib, withSystem, ... }: { perSystem = { config, pkgs, ... }: let @@ -17,4 +17,27 @@ }; }; }; + + flake.nixosModules = let + packageModule = pkgAttrName: { pkgs, ... }: { + services.invokeai.package = withSystem pkgs.system ( + { config, ... }: lib.mkOptionDefault config.packages.${pkgAttrName} + ); + }; + in { + invokeai = ./nixos; + invokeai-amd = { + imports = [ + config.flake.nixosModules.invokeai + ./nixos/amd.nix + (packageModule "invokeai-amd") + ]; + }; + invokeai-nvidia = { + imports = [ + config.flake.nixosModules.invokeai + (packageModule "invokeai-nvidia") + ]; + }; + }; } diff --git a/projects/invokeai/nixos/amd.nix b/projects/invokeai/nixos/amd.nix new file mode 100644 index 00000000..d49aa27c --- /dev/null +++ b/projects/invokeai/nixos/amd.nix @@ -0,0 +1,12 @@ +{ pkgs, ... }: + +{ + systemd = { + # Allow "unsupported" AMD GPUs + services.invokeai.environment.HSA_OVERRIDE_GFX_VERSION = "10.3.0"; + # HACK: The PyTorch build we use on ROCm wants this to exist + tmpfiles.rules = [ + "L+ /opt/amdgpu - - - - ${pkgs.libdrm}" + ]; + }; +} diff --git a/projects/invokeai/nixos/default.nix b/projects/invokeai/nixos/default.nix new file mode 100644 index 00000000..70833adf --- /dev/null +++ b/projects/invokeai/nixos/default.nix @@ -0,0 +1,116 @@ +{ config, lib, ... }: + +let + inherit (lib) + mkIf mkOption mkEnableOption types + escapeShellArgs getExe optionalString + ; + + cfg = config.services.invokeai; +in + +{ + options.services.invokeai = { + enable = mkEnableOption "InvokeAI Web UI for Stable Diffusion"; + + package = mkOption { + description = "Which InvokeAI package to use."; + type = types.package; + }; + + user = mkOption { + description = "Which user to run InvokeAI as."; + default = "invokeai"; + type = types.str; + }; + + group = mkOption { + description = "Which group to run InvokeAI as."; + default = "invokeai"; + type = types.str; + }; + + host = mkOption { + description = "Which IP address to listen on."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Which port to listen on."; + default = 9090; + type = types.port; + }; + + dataDir = mkOption { + description = "Where to store InvokeAI's state."; + default = "/var/lib/invokeai"; + type = types.path; + }; + + maxLoadedModels = mkOption { + description = "Maximum amount of models to keep in VRAM at once."; + default = 1; + type = types.ints.positive; + }; + + nsfwChecker = mkEnableOption "the NSFW Checker"; + + precision = mkOption { + description = "Set model precision."; + default = "auto"; + type = types.enum [ "auto" "float32" "autocast" "float16" ]; + }; + + extraArgs = mkOption { + description = "Extra command line arguments."; + default = []; + type = with types; listOf str; + }; + }; + + config = let + + yesno = enable: text: "--${optionalString (!enable) "no-"}${text}"; + + cliArgs = [ + "--web" + "--host" cfg.host + "--port" cfg.port + "--root_dir" cfg.dataDir + "--max_loaded_models" cfg.maxLoadedModels + (yesno cfg.nsfwChecker "nsfw_checker") + "--precision" cfg.precision + ] ++ cfg.extraArgs; + initialModelsPath = "${cfg.package}/${cfg.package.pythonModule.sitePackages}/invokeai/configs/INITIAL_MODELS.yaml"; + in mkIf cfg.enable { + users.users = mkIf (cfg.user == "invokeai") { + invokeai = { + isSystemUser = true; + inherit (cfg) group; + }; + }; + users.groups = mkIf (cfg.group == "invokeai") { + invokeai = {}; + }; + systemd.services.invokeai = { + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + preStart = '' + ln -sf ${initialModelsPath} '${cfg.dataDir}/configs/INITIAL_MODELS.yaml' + cp -L --no-clobber --no-preserve=mode ${initialModelsPath} '${cfg.dataDir}/configs/models.yaml' + ''; + environment.HOME = "${cfg.dataDir}/.home"; + serviceConfig = { + User = cfg.user; + Group = cfg.group; + ExecStart = "${getExe cfg.package} ${escapeShellArgs cliArgs}"; + }; + }; + systemd.tmpfiles.rules = [ + "d '${cfg.dataDir}' 0755 ${cfg.user} ${cfg.group} - -" + "d '${cfg.dataDir}/configs' 0755 ${cfg.user} ${cfg.group} - -" + "d '${cfg.dataDir}/.home' 0750 ${cfg.user} ${cfg.group} - -" + ]; + }; +} diff --git a/projects/koboldai/default.nix b/projects/koboldai/default.nix index 97a683c8..eb3f7f66 100644 --- a/projects/koboldai/default.nix +++ b/projects/koboldai/default.nix @@ -1,4 +1,4 @@ -{ inputs, lib, ... }: +{ config, inputs, lib, withSystem, ... }: { perSystem = { config, pkgs, ... }: let @@ -17,4 +17,27 @@ }; }; }; + + flake.nixosModules = let + packageModule = pkgAttrName: { pkgs, ... }: { + services.koboldai.package = withSystem pkgs.system ( + { config, ... }: lib.mkOptionDefault config.packages.${pkgAttrName} + ); + }; + in { + koboldai = ./nixos; + koboldai-amd = { + imports = [ + config.flake.nixosModules.koboldai + ./nixos/amd.nix + (packageModule "koboldai-amd") + ]; + }; + koboldai-nvidia = { + imports = [ + config.flake.nixosModules.koboldai + (packageModule "koboldai-nvidia") + ]; + }; + }; } diff --git a/projects/koboldai/nixos/amd.nix b/projects/koboldai/nixos/amd.nix new file mode 100644 index 00000000..0d3ce7cf --- /dev/null +++ b/projects/koboldai/nixos/amd.nix @@ -0,0 +1,12 @@ +{ pkgs, ... }: + +{ + systemd = { + # Allow "unsupported" AMD GPUs + services.koboldai.environment.HSA_OVERRIDE_GFX_VERSION = "10.3.0"; + # HACK: The PyTorch build we use on ROCm wants this to exist + tmpfiles.rules = [ + "L+ /opt/amdgpu - - - - ${pkgs.libdrm}" + ]; + }; +} diff --git a/projects/koboldai/nixos/default.nix b/projects/koboldai/nixos/default.nix new file mode 100644 index 00000000..a3956d21 --- /dev/null +++ b/projects/koboldai/nixos/default.nix @@ -0,0 +1,87 @@ +{ config, lib, ... }: + +let + inherit (lib) + mkIf mkOption mkEnableOption types + escapeShellArgs getExe optional + ; + + cfg = config.services.koboldai; +in + +{ + options.services.koboldai= { + enable = mkEnableOption "KoboldAI Web UI"; + + package = mkOption { + description = "Which KoboldAI package to use."; + type = types.package; + }; + + user = mkOption { + description = "Which user to run KoboldAI as."; + default = "koboldai"; + type = types.str; + }; + + group = mkOption { + description = "Which group to run KoboldAI as."; + default = "koboldai"; + type = types.str; + }; + + host = mkOption { + description = "Whether to make KoboldAI remotely accessible."; + default = false; + type = types.bool; + }; + + port = mkOption { + description = "Which port to listen on."; + default = 5000; + type = types.port; + }; + + dataDir = mkOption { + description = "Where to store KoboldAI's state."; + default = "/var/lib/koboldai"; + type = types.path; + }; + + extraArgs = mkOption { + description = "Extra command line arguments."; + default = []; + type = with types; listOf str; + }; + }; + + config = let + cliArgs = (optional cfg.host "--host") ++ [ + "--port" cfg.port + ] ++ cfg.extraArgs; + in mkIf cfg.enable { + users.users = mkIf (cfg.user == "koboldai") { + koboldai = { + isSystemUser = true; + inherit (cfg) group; + }; + }; + users.groups = mkIf (cfg.group == "koboldai") { + koboldai = {}; + }; + systemd.services.koboldai = { + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + environment.HOME = cfg.dataDir; + serviceConfig = { + User = cfg.user; + Group = cfg.group; + ExecStart = "${getExe cfg.package} ${escapeShellArgs cliArgs}"; + PrivateTmp = true; + }; + }; + systemd.tmpfiles.rules = [ + "d '${cfg.dataDir}' 0755 ${cfg.user} ${cfg.group} - -" + ]; + }; +}