"Release" 25.09 Giving
Some checks failed
CI / Check, build and cache nixfiles (push) Failing after 31m46s
Some checks failed
CI / Check, build and cache nixfiles (push) Failing after 31m46s
This commit is contained in:
@@ -14,7 +14,7 @@ in
|
||||
owner = "pdns";
|
||||
group = "pdns";
|
||||
};
|
||||
"estuary/pdns/recursor.conf" = {
|
||||
"estuary/pdns/recursor.yml" = {
|
||||
owner = "pdns-recursor";
|
||||
group = "pdns-recursor";
|
||||
};
|
||||
@@ -31,7 +31,7 @@ in
|
||||
|
||||
pdns.recursor = {
|
||||
enable = true;
|
||||
extraSettingsFile = config.age.secrets."estuary/pdns/recursor.conf".path;
|
||||
extraSettingsFile = config.age.secrets."estuary/pdns/recursor.yml".path;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -44,45 +44,55 @@ in
|
||||
};
|
||||
|
||||
pdns-recursor = {
|
||||
dns = {
|
||||
address = [
|
||||
"127.0.0.1" "::1"
|
||||
assignments.base.ipv4.address assignments.base.ipv6.address
|
||||
];
|
||||
allowFrom = [
|
||||
"127.0.0.0/8" "::1/128"
|
||||
prefixes.all.v4 prefixes.all.v6
|
||||
] ++ (with lib.my.c.tailscale.prefix; [ v4 v6 ]);
|
||||
};
|
||||
yaml-settings = {
|
||||
incoming = {
|
||||
listen = [
|
||||
"127.0.0.1" "::1"
|
||||
assignments.base.ipv4.address assignments.base.ipv6.address
|
||||
];
|
||||
allow_from = [
|
||||
"127.0.0.0/8" "::1/128"
|
||||
prefixes.all.v4 prefixes.all.v6
|
||||
] ++ (with lib.my.c.tailscale.prefix; [ v4 v6 ]);
|
||||
|
||||
settings = {
|
||||
query-local-address = [
|
||||
assignments.internal.ipv4.address
|
||||
assignments.internal.ipv6.address
|
||||
assignments.base.ipv6.address
|
||||
];
|
||||
forward-zones = map (z: "${z}=127.0.0.1:5353") authZones;
|
||||
# DNS NOTIFY messages override TTL
|
||||
allow_notify_for = authZones;
|
||||
allow_notify_from = [ "127.0.0.0/8" "::1/128" ];
|
||||
};
|
||||
|
||||
# DNS NOTIFY messages override TTL
|
||||
allow-notify-for = authZones;
|
||||
allow-notify-from = [ "127.0.0.0/8" "::1/128" ];
|
||||
outgoing = {
|
||||
source_address = [
|
||||
assignments.internal.ipv4.address
|
||||
assignments.internal.ipv6.address
|
||||
assignments.base.ipv6.address
|
||||
];
|
||||
};
|
||||
|
||||
webserver = true;
|
||||
webserver-address = "::";
|
||||
webserver-allow-from = [ "127.0.0.1" "::1" ];
|
||||
recursor = {
|
||||
forward_zones = map (z: {
|
||||
zone = z;
|
||||
forwarders = [ "127.0.0.1:5353" ];
|
||||
}) authZones;
|
||||
|
||||
lua-dns-script = pkgs.writeText "pdns-script.lua" ''
|
||||
function preresolve(dq)
|
||||
if dq.qname:equal("nix-cache.nul.ie") then
|
||||
dq:addAnswer(pdns.CNAME, "http.${config.networking.domain}.")
|
||||
dq.rcode = 0
|
||||
dq.followupFunction = "followCNAMERecords"
|
||||
return true
|
||||
lua_dns_script = pkgs.writeText "pdns-script.lua" ''
|
||||
function preresolve(dq)
|
||||
if dq.qname:equal("nix-cache.nul.ie") then
|
||||
dq:addAnswer(pdns.CNAME, "http.${config.networking.domain}.")
|
||||
dq.rcode = 0
|
||||
dq.followupFunction = "followCNAMERecords"
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
'';
|
||||
};
|
||||
|
||||
return false
|
||||
end
|
||||
'';
|
||||
webservice = {
|
||||
webserver = true;
|
||||
address = "::";
|
||||
allow_from = [ "127.0.0.1" "::1" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -87,7 +87,7 @@ in
|
||||
netdata.enable = true;
|
||||
mastodon = mkMerge [
|
||||
rec {
|
||||
enable = true;
|
||||
enable = false;
|
||||
localDomain = extraConfig.WEB_DOMAIN; # for nginx config
|
||||
extraConfig = {
|
||||
LOCAL_DOMAIN = "nul.ie";
|
||||
@@ -95,7 +95,9 @@ in
|
||||
};
|
||||
|
||||
secretKeyBaseFile = config.age.secrets."toot/secret-key.txt".path;
|
||||
otpSecretFile = config.age.secrets."toot/otp-secret.txt".path;
|
||||
# TODO: This was removed at some point.
|
||||
# If we want to bring Mastodon back, this will probably need to be addressd.
|
||||
# otpSecretFile = config.age.secrets."toot/otp-secret.txt".path;
|
||||
vapidPrivateKeyFile = config.age.secrets."toot/vapid-key.txt".path;
|
||||
vapidPublicKeyFile = toString (pkgs.writeText
|
||||
"vapid-pubkey.txt"
|
||||
@@ -164,7 +166,7 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
pds = {
|
||||
bluesky-pds = {
|
||||
enable = true;
|
||||
environmentFiles = [ config.age.secrets."toot/pds.env".path ];
|
||||
settings = {
|
||||
|
@@ -178,6 +178,9 @@ in
|
||||
dependencies = with ps; [
|
||||
requests
|
||||
];
|
||||
|
||||
pyproject = true;
|
||||
build-system = [ ps.setuptools ];
|
||||
};
|
||||
in
|
||||
{
|
||||
|
@@ -55,8 +55,8 @@ in
|
||||
unifi = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
unifiPackage = pkgs.unifi8;
|
||||
mongodbPackage = pkgs.mongodb-6_0;
|
||||
unifiPackage = pkgs.unifi;
|
||||
mongodbPackage = pkgs.mongodb-7_0;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -141,8 +141,8 @@ in
|
||||
onState = [ "configured" ];
|
||||
script = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
if [ $IFACE = "wan-ifb" ]; then
|
||||
${pkgs.iproute2}/bin/tc filter add dev wan parent ffff: matchall action mirred egress redirect dev $IFACE
|
||||
if [ "$IFACE" = "wan-ifb" ]; then
|
||||
${pkgs.iproute2}/bin/tc filter add dev wan parent ffff: matchall action mirred egress redirect dev "$IFACE"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
@@ -19,7 +19,7 @@ in
|
||||
owner = "pdns";
|
||||
group = "pdns";
|
||||
};
|
||||
"home/pdns/recursor.conf" = {
|
||||
"home/pdns/recursor.yml" = {
|
||||
owner = "pdns-recursor";
|
||||
group = "pdns-recursor";
|
||||
};
|
||||
@@ -28,71 +28,78 @@ in
|
||||
|
||||
pdns.recursor = {
|
||||
enable = true;
|
||||
extraSettingsFile = config.age.secrets."home/pdns/recursor.conf".path;
|
||||
extraSettingsFile = config.age.secrets."home/pdns/recursor.yml".path;
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
pdns-recursor = {
|
||||
dns = {
|
||||
address = [
|
||||
"127.0.0.1" "::1"
|
||||
assignments.hi.ipv4.address assignments.hi.ipv6.address
|
||||
assignments.lo.ipv4.address assignments.lo.ipv6.address
|
||||
];
|
||||
allowFrom = [
|
||||
"127.0.0.0/8" "::1/128"
|
||||
prefixes.hi.v4 prefixes.hi.v6
|
||||
prefixes.lo.v4 prefixes.lo.v6
|
||||
] ++ (with lib.my.c.tailscale.prefix; [ v4 v6 ]);
|
||||
};
|
||||
yaml-settings = {
|
||||
incoming = {
|
||||
listen = [
|
||||
"127.0.0.1" "::1"
|
||||
assignments.hi.ipv4.address assignments.hi.ipv6.address
|
||||
assignments.lo.ipv4.address assignments.lo.ipv6.address
|
||||
];
|
||||
allow_from = [
|
||||
"127.0.0.0/8" "::1/128"
|
||||
prefixes.hi.v4 prefixes.hi.v6
|
||||
prefixes.lo.v4 prefixes.lo.v6
|
||||
] ++ (with lib.my.c.tailscale.prefix; [ v4 v6 ]);
|
||||
|
||||
settings = {
|
||||
query-local-address = [
|
||||
"0.0.0.0"
|
||||
"::"
|
||||
];
|
||||
forward-zones = map (z: "${z}=127.0.0.1:5353") authZones;
|
||||
# DNS NOTIFY messages override TTL
|
||||
allow_notify_for = authZones;
|
||||
allow_notify_from = [ "127.0.0.0/8" "::1/128" ];
|
||||
};
|
||||
|
||||
# DNS NOTIFY messages override TTL
|
||||
allow-notify-for = authZones;
|
||||
allow-notify-from = [ "127.0.0.0/8" "::1/128" ];
|
||||
outgoing = {
|
||||
source_address = [ "0.0.0.0" "::" ];
|
||||
};
|
||||
|
||||
webserver = true;
|
||||
webserver-address = "::";
|
||||
webserver-allow-from = [ "127.0.0.1" "::1" ];
|
||||
recursor = {
|
||||
forward_zones = map (z: {
|
||||
zone = z;
|
||||
forwarders = [ "127.0.0.1:5353" ];
|
||||
}) authZones;
|
||||
|
||||
lua-dns-script = pkgs.writeText "pdns-script.lua" ''
|
||||
blocklist = newDS()
|
||||
lua_dns_script = pkgs.writeText "pdns-script.lua" ''
|
||||
blocklist = newDS()
|
||||
|
||||
function preresolve(dq)
|
||||
local name = dq.qname:toString()
|
||||
function preresolve(dq)
|
||||
local name = dq.qname:toString()
|
||||
|
||||
-- Disney+ doesn't like our IP space...
|
||||
if dq.qtype == pdns.AAAA and (string.find(name, "disneyplus") or string.find(name, "disney-plus") or string.find(name , "disney.api")) then
|
||||
dq.rcode = 0
|
||||
return true
|
||||
end
|
||||
|
||||
if blocklist:check(dq.qname) then
|
||||
if dq.qtype == pdns.A then
|
||||
dq:addAnswer(dq.qtype, "127.0.0.1")
|
||||
elseif dq.qtype == pdns.AAAA then
|
||||
dq:addAnswer(dq.qtype, "::1")
|
||||
-- Disney+ doesn't like our IP space...
|
||||
if dq.qtype == pdns.AAAA and (string.find(name, "disneyplus") or string.find(name, "disney-plus") or string.find(name , "disney.api")) then
|
||||
dq.rcode = 0
|
||||
return true
|
||||
end
|
||||
return true
|
||||
|
||||
if blocklist:check(dq.qname) then
|
||||
if dq.qtype == pdns.A then
|
||||
dq:addAnswer(dq.qtype, "127.0.0.1")
|
||||
elseif dq.qtype == pdns.AAAA then
|
||||
dq:addAnswer(dq.qtype, "::1")
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
for line in io.lines("${./dns-blocklist.txt}") do
|
||||
entry = line:gsub("%s+", "")
|
||||
if entry ~= "" and string.sub(entry, 1, 1) ~= "#" then
|
||||
blocklist:add(entry)
|
||||
for line in io.lines("${./dns-blocklist.txt}") do
|
||||
entry = line:gsub("%s+", "")
|
||||
if entry ~= "" and string.sub(entry, 1, 1) ~= "#" then
|
||||
blocklist:add(entry)
|
||||
end
|
||||
end
|
||||
end
|
||||
'';
|
||||
'';
|
||||
};
|
||||
|
||||
webservice = {
|
||||
webserver = true;
|
||||
address = "::";
|
||||
allow_from = [ "127.0.0.1" "::1" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -92,7 +92,8 @@ in
|
||||
|
||||
nextcloud = {
|
||||
enable = true;
|
||||
package = pkgs.nextcloud29;
|
||||
# TODO: Might need to do some bullshit to go from Nextcloud 28 (?) to 30
|
||||
package = pkgs.nextcloud30;
|
||||
datadir = "/mnt/storage/nextcloud";
|
||||
hostName = "cloud.${domain}";
|
||||
https = true;
|
||||
|
@@ -31,8 +31,10 @@
|
||||
server.enable = true;
|
||||
};
|
||||
|
||||
image = {
|
||||
baseName = "jackos-installer";
|
||||
};
|
||||
isoImage = {
|
||||
isoBaseName = "jackos-installer";
|
||||
volumeID = "jackos-${config.system.nixos.release}-${pkgs.stdenv.hostPlatform.uname.processor}";
|
||||
edition = "devplayer0";
|
||||
appendToMenuLabel = " /dev/player0 Installer";
|
||||
|
@@ -1,4 +1,4 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{ inputs, lib, pkgs, config, ... }:
|
||||
let
|
||||
inherit (builtins) substring match;
|
||||
inherit (lib)
|
||||
@@ -127,7 +127,9 @@ in
|
||||
enable = mkBoolOpt' false "Whether to enable borgthin jobs";
|
||||
lvmPackage = mkOpt' package pkgs.lvm2 "Packge containing LVM tools";
|
||||
thinToolsPackage = mkOpt' package pkgs.thin-provisioning-tools "Package containing thin-provisioning-tools";
|
||||
package = mkOpt' package pkgs.borgthin "borgthin package";
|
||||
# Really we should use the version from the overlay, but the package is quite far behind...
|
||||
# Not bothering to update until Borg 2.0 releases
|
||||
package = mkOpt' package inputs.borgthin.packages.${config.nixpkgs.system}.borgthin "borgthin package";
|
||||
jobs = mkOpt' (attrsOf jobType) { } "borgthin jobs";
|
||||
};
|
||||
|
||||
|
@@ -221,8 +221,8 @@ in
|
||||
memorySize = dummyOption;
|
||||
qemu.options = dummyOption;
|
||||
};
|
||||
image.baseName = dummyOption;
|
||||
isoImage = {
|
||||
isoBaseName = dummyOption;
|
||||
volumeID = dummyOption;
|
||||
edition = dummyOption;
|
||||
appendToMenuLabel = dummyOption;
|
||||
|
@@ -65,9 +65,10 @@ in
|
||||
};
|
||||
nixpkgs = {
|
||||
overlays = [
|
||||
inputs.deploy-rs.overlay
|
||||
inputs.deploy-rs.overlays.default
|
||||
inputs.sharry.overlays.default
|
||||
inputs.borgthin.overlays.default
|
||||
# TODO: Re-enable when borgthin is updated
|
||||
# inputs.borgthin.overlays.default
|
||||
inputs.boardie.overlays.default
|
||||
];
|
||||
config = {
|
||||
|
@@ -30,23 +30,25 @@ let
|
||||
substituteAll ${./menu.ipxe} "$out"
|
||||
'';
|
||||
|
||||
bootBuilder = pkgs.substituteAll {
|
||||
bootBuilder = pkgs.replaceVarsWith {
|
||||
src = ./netboot-loader-builder.py;
|
||||
isExecutable = true;
|
||||
|
||||
inherit (pkgs) python3;
|
||||
bootspecTools = pkgs.bootspec;
|
||||
nix = config.nix.package.out;
|
||||
replacements = {
|
||||
inherit (pkgs) python3;
|
||||
bootspecTools = pkgs.bootspec;
|
||||
nix = config.nix.package.out;
|
||||
|
||||
inherit (config.system.nixos) distroName;
|
||||
systemName = config.system.name;
|
||||
inherit (cfg.client) configurationLimit;
|
||||
checkMountpoints = pkgs.writeShellScript "check-mountpoints" ''
|
||||
if ! ${pkgs.util-linuxMinimal}/bin/findmnt /boot > /dev/null; then
|
||||
echo "/boot is not a mounted partition. Is the path configured correctly?" >&2
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
inherit (config.system.nixos) distroName;
|
||||
systemName = config.system.name;
|
||||
inherit (cfg.client) configurationLimit;
|
||||
checkMountpoints = pkgs.writeShellScript "check-mountpoints" ''
|
||||
if ! ${pkgs.util-linuxMinimal}/bin/findmnt /boot > /dev/null; then
|
||||
echo "/boot is not a mounted partition. Is the path configured correctly?" >&2
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
@@ -12,16 +12,6 @@ in
|
||||
useNetworkd = mkDefault true;
|
||||
};
|
||||
|
||||
systemd = {
|
||||
additionalUpstreamSystemUnits = mkIf (config.system.nixos.release == "24.12:u-24.11") [
|
||||
# TODO: NixOS has its own version of this, but with `network` instead of `networkd`. Is this just a typo? It
|
||||
# hasn't been updated in 2 years...
|
||||
# This has been done upstream now :)
|
||||
# TODO: Remove when 25.05 releases
|
||||
"systemd-networkd-wait-online@.service"
|
||||
];
|
||||
};
|
||||
|
||||
services.resolved = {
|
||||
domains = [ config.networking.domain ];
|
||||
# Explicitly unset fallback DNS (Nix module will not allow for a blank config)
|
||||
|
@@ -4,19 +4,6 @@ let
|
||||
inherit (lib.my) mkOpt';
|
||||
|
||||
cfg = config.my.nvme;
|
||||
nvme-cli = pkgs.nvme-cli.override {
|
||||
libnvme = pkgs.libnvme.overrideAttrs (o: rec {
|
||||
# TODO: Remove when 1.11.1 releases (see https://github.com/linux-nvme/libnvme/pull/914)
|
||||
version = "1.11.1";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "linux-nvme";
|
||||
repo = "libnvme";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-CEGr7PDOVRi210XvICH8iLYDKn8S9bGruBO4tycvsT8=";
|
||||
};
|
||||
patches = (if (o ? patches) then o.patches else [ ]) ++ [ ./libnvme-hostconf.patch ];
|
||||
});
|
||||
};
|
||||
|
||||
hostNQN = "nqn.2014-08.org.nvmexpress:uuid:${cfg.uuid}";
|
||||
etc = prefix: {
|
||||
@@ -36,7 +23,7 @@ in
|
||||
config = mkIf (cfg.uuid != null) {
|
||||
environment = {
|
||||
systemPackages = [
|
||||
nvme-cli
|
||||
pkgs.nvme-cli
|
||||
];
|
||||
etc = etc "";
|
||||
};
|
||||
@@ -52,10 +39,6 @@ in
|
||||
ip = "${iproute2}/bin/ip";
|
||||
nvme = "${nvme-cli}/bin/nvme";
|
||||
};
|
||||
extraConfig = ''
|
||||
DefaultTimeoutStartSec=20
|
||||
DefaultDeviceTimeoutSec=20
|
||||
'';
|
||||
|
||||
network = {
|
||||
enable = true;
|
||||
@@ -70,14 +53,25 @@ in
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${nvme-cli}/bin/nvme connect -t rdma -a ${cfg.boot.address} -n ${cfg.boot.nqn}";
|
||||
ExecStart = "${pkgs.nvme-cli}/bin/nvme connect -t rdma -a ${cfg.boot.address} -n ${cfg.boot.nqn}";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
|
||||
wantedBy = [ "initrd-root-device.target" ];
|
||||
};
|
||||
};
|
||||
# TODO: Remove when 25.11 releases
|
||||
} // (if (lib.versionAtLeast lib.my.upstreamRelease "25.11") then {
|
||||
settings.Manager = {
|
||||
DefaultTimeoutStartSec = 20;
|
||||
DefaultDeviceTimeoutSec = 20;
|
||||
};
|
||||
} else {
|
||||
extraConfig = ''
|
||||
DefaultTimeoutStartSec=20
|
||||
DefaultDeviceTimeoutSec=20
|
||||
'';
|
||||
});
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -1,7 +1,7 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
let
|
||||
inherit (builtins) isList;
|
||||
inherit (lib) mkMerge mkIf mkDefault mapAttrsToList concatMapStringsSep concatStringsSep;
|
||||
inherit (lib) mkMerge mkIf mkDefault mapAttrsToList concatMapStringsSep concatStringsSep getExe;
|
||||
inherit (lib.my) mkBoolOpt' mkOpt';
|
||||
|
||||
# Yoinked from nixos/modules/services/networking/pdns-recursor.nix
|
||||
@@ -165,7 +165,7 @@ let
|
||||
|
||||
extraSettingsOpt = with lib.types; mkOpt' (nullOr str) null "Path to extra settings (e.g. for secrets).";
|
||||
baseAuthSettings = pkgs.writeText "pdns.conf" (settingsToLines cfg.auth.settings);
|
||||
baseRecursorSettings = pkgs.writeText "pdns-recursor.conf" (settingsToLines config.services.pdns-recursor.settings);
|
||||
baseRecursorSettings = (pkgs.formats.yaml { }).generate "pdns-recursor.yaml" config.services.pdns-recursor.yaml-settings;
|
||||
generateSettings = type: base: dst: if (cfg."${type}".extraSettingsFile != null) then ''
|
||||
oldUmask="$(umask)"
|
||||
umask 006
|
||||
@@ -174,6 +174,14 @@ let
|
||||
'' else ''
|
||||
cp "${base}" "${dst}"
|
||||
'';
|
||||
generateYamlSettings = type: base: dst: if (cfg."${type}".extraSettingsFile != null) then ''
|
||||
oldUmask="$(umask)"
|
||||
umask 006
|
||||
${getExe pkgs.yaml-merge} "${base}" "${cfg."${type}".extraSettingsFile}" > "${dst}"
|
||||
umask "$oldUmask"
|
||||
'' else ''
|
||||
cp "${base}" "${dst}"
|
||||
'';
|
||||
|
||||
namedConf = pkgs.writeText "pdns-named.conf" ''
|
||||
options {
|
||||
@@ -315,9 +323,9 @@ in
|
||||
(mkIf cfg.recursor.enable {
|
||||
systemd.services.pdns-recursor = {
|
||||
preStart = ''
|
||||
${generateSettings "recursor" baseRecursorSettings "/run/pdns-recursor/recursor.conf"}
|
||||
${generateYamlSettings "recursor" baseRecursorSettings "/run/pdns-recursor/recursor.yml"}
|
||||
'';
|
||||
serviceConfig.ExecStart = [ "" "${pkgs.pdns-recursor}/bin/pdns_recursor --config-dir=/run/pdns-recursor" ];
|
||||
serviceConfig.ExecStart = [ "" "${pkgs.pdns-recursor}/bin/pdns_recursor --config-dir=/run/pdns-recursor --daemon=no --write-pid=no --disable-syslog --log-timestamp=no" ];
|
||||
};
|
||||
|
||||
services.pdns-recursor = {
|
||||
|
@@ -551,7 +551,7 @@ in
|
||||
];
|
||||
});
|
||||
})
|
||||
(mkIf (config.services ? "pds" && config.services.pds.enable) {
|
||||
(mkIf (config.services ? "bluesky-pds" && config.services.bluesky-pds.enable) {
|
||||
my.tmproot.persistence.config.directories = [
|
||||
{
|
||||
directory = "/var/lib/pds";
|
||||
|
Reference in New Issue
Block a user