Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NixOS modules #7

Merged
merged 5 commits into from
Apr 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion projects/invokeai/default.nix
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{ inputs, lib, ... }:
{ config, inputs, lib, withSystem, ... }:

{
perSystem = { config, pkgs, ... }: let
Expand All @@ -17,4 +17,27 @@
};
};
};

flake.nixosModules = let
packageModule = pkgAttrName: { pkgs, ... }: {
services.invokeai.package = withSystem pkgs.system (
{ config, ... }: lib.mkOptionDefault config.packages.${pkgAttrName}
);
};
in {
invokeai = ./nixos;
invokeai-amd = {
imports = [
config.flake.nixosModules.invokeai
./nixos/amd.nix
(packageModule "invokeai-amd")
];
};
invokeai-nvidia = {
imports = [
config.flake.nixosModules.invokeai
(packageModule "invokeai-nvidia")
];
};
};
}
12 changes: 12 additions & 0 deletions projects/invokeai/nixos/amd.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{ pkgs, ... }:

{
systemd = {
# Allow "unsupported" AMD GPUs
services.invokeai.environment.HSA_OVERRIDE_GFX_VERSION = "10.3.0";
# HACK: The PyTorch build we use on ROCm wants this to exist
tmpfiles.rules = [
"L+ /opt/amdgpu - - - - ${pkgs.libdrm}"
];
};
}
116 changes: 116 additions & 0 deletions projects/invokeai/nixos/default.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
{ config, lib, ... }:

let
inherit (lib)
mkIf mkOption mkEnableOption types
escapeShellArgs getExe optionalString
;

cfg = config.services.invokeai;
in

{
options.services.invokeai = {
enable = mkEnableOption "InvokeAI Web UI for Stable Diffusion";

package = mkOption {
description = "Which InvokeAI package to use.";
type = types.package;
};

user = mkOption {
description = "Which user to run InvokeAI as.";
default = "invokeai";
type = types.str;
};

group = mkOption {
description = "Which group to run InvokeAI as.";
default = "invokeai";
type = types.str;
};

host = mkOption {
description = "Which IP address to listen on.";
default = "127.0.0.1";
type = types.str;
};

port = mkOption {
description = "Which port to listen on.";
default = 9090;
type = types.port;
};

dataDir = mkOption {
description = "Where to store InvokeAI's state.";
default = "/var/lib/invokeai";
type = types.path;
};

maxLoadedModels = mkOption {
description = "Maximum amount of models to keep in VRAM at once.";
default = 1;
type = types.ints.positive;
};

nsfwChecker = mkEnableOption "the NSFW Checker";

precision = mkOption {
description = "Set model precision.";
default = "auto";
type = types.enum [ "auto" "float32" "autocast" "float16" ];
};

extraArgs = mkOption {
description = "Extra command line arguments.";
default = [];
type = with types; listOf str;
};
};

config = let

yesno = enable: text: "--${optionalString (!enable) "no-"}${text}";

cliArgs = [
"--web"
"--host" cfg.host
"--port" cfg.port
"--root_dir" cfg.dataDir
"--max_loaded_models" cfg.maxLoadedModels
(yesno cfg.nsfwChecker "nsfw_checker")
"--precision" cfg.precision
] ++ cfg.extraArgs;
initialModelsPath = "${cfg.package}/${cfg.package.pythonModule.sitePackages}/invokeai/configs/INITIAL_MODELS.yaml";
in mkIf cfg.enable {
users.users = mkIf (cfg.user == "invokeai") {
invokeai = {
isSystemUser = true;
inherit (cfg) group;
};
};
users.groups = mkIf (cfg.group == "invokeai") {
invokeai = {};
};
systemd.services.invokeai = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
ln -sf ${initialModelsPath} '${cfg.dataDir}/configs/INITIAL_MODELS.yaml'
cp -L --no-clobber --no-preserve=mode ${initialModelsPath} '${cfg.dataDir}/configs/models.yaml'
'';
environment.HOME = "${cfg.dataDir}/.home";
serviceConfig = {
User = cfg.user;
Group = cfg.group;
ExecStart = "${getExe cfg.package} ${escapeShellArgs cliArgs}";
};
};
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0755 ${cfg.user} ${cfg.group} - -"
"d '${cfg.dataDir}/configs' 0755 ${cfg.user} ${cfg.group} - -"
"d '${cfg.dataDir}/.home' 0750 ${cfg.user} ${cfg.group} - -"
];
};
}
25 changes: 24 additions & 1 deletion projects/koboldai/default.nix
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{ inputs, lib, ... }:
{ config, inputs, lib, withSystem, ... }:

{
perSystem = { config, pkgs, ... }: let
Expand All @@ -17,4 +17,27 @@
};
};
};

flake.nixosModules = let
packageModule = pkgAttrName: { pkgs, ... }: {
services.koboldai.package = withSystem pkgs.system (
{ config, ... }: lib.mkOptionDefault config.packages.${pkgAttrName}
);
};
in {
koboldai = ./nixos;
koboldai-amd = {
imports = [
config.flake.nixosModules.koboldai
./nixos/amd.nix
(packageModule "koboldai-amd")
];
};
koboldai-nvidia = {
imports = [
config.flake.nixosModules.koboldai
(packageModule "koboldai-nvidia")
];
};
};
}
12 changes: 12 additions & 0 deletions projects/koboldai/nixos/amd.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{ pkgs, ... }:

{
systemd = {
# Allow "unsupported" AMD GPUs
services.koboldai.environment.HSA_OVERRIDE_GFX_VERSION = "10.3.0";
# HACK: The PyTorch build we use on ROCm wants this to exist
tmpfiles.rules = [
"L+ /opt/amdgpu - - - - ${pkgs.libdrm}"
];
};
}
87 changes: 87 additions & 0 deletions projects/koboldai/nixos/default.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
{ config, lib, ... }:

let
inherit (lib)
mkIf mkOption mkEnableOption types
escapeShellArgs getExe optional
;

cfg = config.services.koboldai;
in

{
options.services.koboldai= {
enable = mkEnableOption "KoboldAI Web UI";

package = mkOption {
description = "Which KoboldAI package to use.";
type = types.package;
};

user = mkOption {
description = "Which user to run KoboldAI as.";
default = "koboldai";
type = types.str;
};

group = mkOption {
description = "Which group to run KoboldAI as.";
default = "koboldai";
type = types.str;
};

host = mkOption {
description = "Whether to make KoboldAI remotely accessible.";
default = false;
type = types.bool;
};

port = mkOption {
description = "Which port to listen on.";
default = 5000;
type = types.port;
};

dataDir = mkOption {
description = "Where to store KoboldAI's state.";
default = "/var/lib/koboldai";
type = types.path;
};

extraArgs = mkOption {
description = "Extra command line arguments.";
default = [];
type = with types; listOf str;
};
};

config = let
cliArgs = (optional cfg.host "--host") ++ [
"--port" cfg.port
] ++ cfg.extraArgs;
in mkIf cfg.enable {
users.users = mkIf (cfg.user == "koboldai") {
koboldai = {
isSystemUser = true;
inherit (cfg) group;
};
};
users.groups = mkIf (cfg.group == "koboldai") {
koboldai = {};
};
systemd.services.koboldai = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment.HOME = cfg.dataDir;
serviceConfig = {
User = cfg.user;
Group = cfg.group;
ExecStart = "${getExe cfg.package} ${escapeShellArgs cliArgs}";
PrivateTmp = true;
};
};
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0755 ${cfg.user} ${cfg.group} - -"
];
};
}