nixos: Move colony VMs into subdirectory
This commit is contained in:
@@ -1,4 +1,6 @@
|
||||
{
|
||||
imports = [ ./vms ];
|
||||
|
||||
nixos.systems.colony = {
|
||||
system = "x86_64-linux";
|
||||
nixpkgs = "mine-stable";
|
||||
@@ -27,7 +29,7 @@
|
||||
inherit (lib.my) networkdAssignment;
|
||||
in
|
||||
{
|
||||
imports = [ "${modulesPath}/profiles/qemu-guest.nix" ./vms.nix ];
|
||||
imports = [ "${modulesPath}/profiles/qemu-guest.nix" ];
|
||||
|
||||
networking.domain = lib.my.colonyDomain;
|
||||
|
||||
|
@@ -1,63 +0,0 @@
|
||||
{ lib, pkgs, config, systems, ... }:
|
||||
let
|
||||
wanBDF =
|
||||
if config.my.build.isDevVM then "00:02.0" else "01:00.0";
|
||||
in
|
||||
{
|
||||
systemd = {
|
||||
services."vm@estuary" = {
|
||||
# Depend the interface, networkd wait-online would deadlock...
|
||||
requires = [ "sys-subsystem-net-devices-base.device" ];
|
||||
preStart = ''
|
||||
count=0
|
||||
while ! ${pkgs.iproute2}/bin/ip link show dev base > /dev/null 2>&1; do
|
||||
count=$((count+1))
|
||||
if [ $count -ge 5 ]; then
|
||||
echo "Timed out waiting for bridge interface"
|
||||
fi
|
||||
sleep 0.5
|
||||
done
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
my = {
|
||||
vms = {
|
||||
instances.estuary = {
|
||||
uuid = "59f51efb-7e6d-477b-a263-ed9620dbc87b";
|
||||
networks.base.mac = "52:54:00:ab:f1:52";
|
||||
drives = {
|
||||
installer = {
|
||||
backend = {
|
||||
driver = "file";
|
||||
filename = "${systems.installer.configuration.config.my.buildAs.iso}/iso/nixos.iso";
|
||||
read-only = "on";
|
||||
};
|
||||
format.driver = "raw";
|
||||
frontend = "ide-cd";
|
||||
frontendOpts = {
|
||||
bootindex = 1;
|
||||
};
|
||||
};
|
||||
disk = {
|
||||
backend = {
|
||||
driver = "host_device";
|
||||
filename = "/dev/ssds/vm-estuary";
|
||||
# It appears this needs to be set on the backend _and_ the format
|
||||
discard = "unmap";
|
||||
};
|
||||
format = {
|
||||
driver = "raw";
|
||||
discard = "unmap";
|
||||
};
|
||||
frontend = "virtio-blk";
|
||||
frontendOpts = {
|
||||
bootindex = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
hostDevices."${wanBDF}" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
69
nixos/boxes/colony/vms/default.nix
Normal file
69
nixos/boxes/colony/vms/default.nix
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
imports = [
|
||||
./estuary
|
||||
];
|
||||
|
||||
nixos.systems.colony.configuration = { lib, pkgs, config, systems, ... }:
|
||||
let
|
||||
wanBDF =
|
||||
if config.my.build.isDevVM then "00:02.0" else "01:00.0";
|
||||
in
|
||||
{
|
||||
systemd = {
|
||||
services."vm@estuary" = {
|
||||
# Depend the interface, networkd wait-online would deadlock...
|
||||
requires = [ "sys-subsystem-net-devices-base.device" ];
|
||||
preStart = ''
|
||||
count=0
|
||||
while ! ${pkgs.iproute2}/bin/ip link show dev base > /dev/null 2>&1; do
|
||||
count=$((count+1))
|
||||
if [ $count -ge 5 ]; then
|
||||
echo "Timed out waiting for bridge interface"
|
||||
fi
|
||||
sleep 0.5
|
||||
done
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
my = {
|
||||
vms = {
|
||||
instances.estuary = {
|
||||
uuid = "59f51efb-7e6d-477b-a263-ed9620dbc87b";
|
||||
networks.base.mac = "52:54:00:ab:f1:52";
|
||||
drives = {
|
||||
installer = {
|
||||
backend = {
|
||||
driver = "file";
|
||||
filename = "${systems.installer.configuration.config.my.buildAs.iso}/iso/nixos.iso";
|
||||
read-only = "on";
|
||||
};
|
||||
format.driver = "raw";
|
||||
frontend = "ide-cd";
|
||||
frontendOpts = {
|
||||
bootindex = 1;
|
||||
};
|
||||
};
|
||||
disk = {
|
||||
backend = {
|
||||
driver = "host_device";
|
||||
filename = "/dev/ssds/vm-estuary";
|
||||
# It appears this needs to be set on the backend _and_ the format
|
||||
discard = "unmap";
|
||||
};
|
||||
format = {
|
||||
driver = "raw";
|
||||
discard = "unmap";
|
||||
};
|
||||
frontend = "virtio-blk";
|
||||
frontendOpts = {
|
||||
bootindex = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
hostDevices."${wanBDF}" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
159
nixos/boxes/colony/vms/estuary/default.nix
Normal file
159
nixos/boxes/colony/vms/estuary/default.nix
Normal file
@@ -0,0 +1,159 @@
|
||||
{
|
||||
nixos.systems.estuary = {
|
||||
system = "x86_64-linux";
|
||||
nixpkgs = "mine";
|
||||
home-manager = "mine";
|
||||
|
||||
assignments.internal = {
|
||||
name = "estuary-vm";
|
||||
altNames = [ "fw" ];
|
||||
ipv4 = {
|
||||
address = "10.100.0.1";
|
||||
gateway = null;
|
||||
};
|
||||
#ipv6.address = "2a0e:97c0:4d1:0::1";
|
||||
ipv6.address = "2a0e:97c0:4d0:bbb0::1";
|
||||
};
|
||||
|
||||
configuration = { lib, pkgs, modulesPath, config, assignments, allAssignments, ... }:
|
||||
let
|
||||
inherit (lib) mkIf mkMerge mkForce;
|
||||
inherit (lib.my) networkdAssignment;
|
||||
in
|
||||
{
|
||||
imports = [ "${modulesPath}/profiles/qemu-guest.nix" ./dns.nix ];
|
||||
|
||||
config = mkMerge [
|
||||
{
|
||||
networking.domain = lib.my.colonyDomain;
|
||||
|
||||
boot.kernelParams = [ "console=ttyS0,115200n8" ];
|
||||
fileSystems = {
|
||||
"/boot" = {
|
||||
device = "/dev/disk/by-label/ESP";
|
||||
fsType = "vfat";
|
||||
};
|
||||
"/nix" = {
|
||||
device = "/dev/main/nix";
|
||||
fsType = "ext4";
|
||||
};
|
||||
"/persist" = {
|
||||
device = "/dev/main/persist";
|
||||
fsType = "ext4";
|
||||
neededForBoot = true;
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
lvm = {
|
||||
dmeventd.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.network = {
|
||||
links = {
|
||||
"10-wan" = {
|
||||
matchConfig.MACAddress = "52:54:00:a1:b2:5f";
|
||||
linkConfig.Name = "wan";
|
||||
};
|
||||
"10-base" = {
|
||||
matchConfig.MACAddress = "52:54:00:ab:f1:52";
|
||||
linkConfig.Name = "base";
|
||||
};
|
||||
};
|
||||
|
||||
networks = {
|
||||
"80-wan" = {
|
||||
matchConfig.Name = "wan";
|
||||
DHCP = "ipv4";
|
||||
dhcpV4Config = {
|
||||
UseDNS = false;
|
||||
UseHostname = false;
|
||||
};
|
||||
address = [
|
||||
"2a0e:97c0:4d0:bbbf::1/64"
|
||||
];
|
||||
gateway = [
|
||||
"fe80::215:17ff:fe4b:494a"
|
||||
];
|
||||
networkConfig.IPv6AcceptRA = false;
|
||||
};
|
||||
"80-base" = mkMerge [
|
||||
(networkdAssignment "base" assignments.internal)
|
||||
{
|
||||
dns = [ "127.0.0.1" "::1" ];
|
||||
domains = [ config.networking.domain ];
|
||||
networkConfig = {
|
||||
IPv6AcceptRA = mkForce false;
|
||||
IPv6SendRA = true;
|
||||
};
|
||||
ipv6SendRAConfig = {
|
||||
DNS = [ assignments.internal.ipv6.address ];
|
||||
Domains = [ config.networking.domain ];
|
||||
};
|
||||
ipv6Prefixes = [
|
||||
{
|
||||
#ipv6PrefixConfig.Prefix = "2a0e:97c0:4d1:0::/64";
|
||||
ipv6PrefixConfig.Prefix = "2a0e:97c0:4d0:bbb0::/64";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
my = {
|
||||
#deploy.generate.system.mode = "boot";
|
||||
secrets.key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhxM5mnguExkcLue47QKk1vA72OoPc3HOqqoHqHHfa1";
|
||||
server.enable = true;
|
||||
|
||||
firewall = {
|
||||
trustedInterfaces = [ "base" ];
|
||||
udp.allowed = [ 5353 ];
|
||||
tcp.allowed = [ 5353 ];
|
||||
nat = {
|
||||
enable = true;
|
||||
externalInterface = "wan";
|
||||
};
|
||||
extraRules = ''
|
||||
table inet filter {
|
||||
chain routing-tcp {
|
||||
# Safe enough to allow all SSH
|
||||
tcp dport ssh accept
|
||||
}
|
||||
chain routing-udp {
|
||||
|
||||
}
|
||||
chain filter-routing {
|
||||
tcp flags & (fin|syn|rst|ack) == syn ct state new jump routing-tcp
|
||||
meta l4proto udp ct state new jump routing-udp
|
||||
return
|
||||
}
|
||||
chain forward {
|
||||
iifname wan oifname base jump filter-routing
|
||||
}
|
||||
}
|
||||
table inet nat {
|
||||
chain prerouting {
|
||||
iifname wan meta l4proto { udp, tcp } th dport domain redirect to :5353
|
||||
}
|
||||
chain postrouting {
|
||||
ip saddr 10.100.0.0/16 masquerade
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
(mkIf config.my.build.isDevVM {
|
||||
systemd.network = {
|
||||
netdevs."05-dummy-base".netdevConfig = {
|
||||
Name = "base";
|
||||
Kind = "dummy";
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
135
nixos/boxes/colony/vms/estuary/dns.nix
Normal file
135
nixos/boxes/colony/vms/estuary/dns.nix
Normal file
@@ -0,0 +1,135 @@
|
||||
{ lib, pkgs, config, assignments, allAssignments, ... }:
|
||||
let
|
||||
inherit (builtins) attrNames;
|
||||
inherit (lib) concatStringsSep concatMapStringsSep mapAttrsToList filterAttrs genAttrs optional;
|
||||
|
||||
ptrDots = 2;
|
||||
reverseZone = "100.10.in-addr.arpa";
|
||||
ptrDots6 = 17;
|
||||
reverseZone6 = "b.b.b.0.d.4.0.0.c.7.9.e.0.a.2.ip6.arpa";
|
||||
|
||||
authZones = attrNames config.my.pdns.auth.bind.zones;
|
||||
in
|
||||
{
|
||||
config = {
|
||||
services.pdns-recursor = {
|
||||
enable = true;
|
||||
dns = {
|
||||
address = [
|
||||
"127.0.0.1" "::1"
|
||||
assignments.internal.ipv4.address assignments.internal.ipv6.address
|
||||
];
|
||||
allowFrom = [
|
||||
"127.0.0.0/8" "::1/128"
|
||||
"10.100.0.0/16" "2a0e:97c0:4d1::/48"
|
||||
# TODO: Remove when moving to proper net!
|
||||
"2a0e:97c0:4d0::/48"
|
||||
];
|
||||
};
|
||||
forwardZones = genAttrs authZones (_: "127.0.0.1:5353");
|
||||
|
||||
settings = {
|
||||
query-local-address = [ "0.0.0.0" "::" ];
|
||||
|
||||
# DNS NOTIFY messages override TTL
|
||||
allow-notify-for = authZones;
|
||||
allow-notify-from = [ "127.0.0.0/8" "::1/128" ];
|
||||
};
|
||||
};
|
||||
# For rec_control
|
||||
environment.systemPackages = with pkgs; [
|
||||
pdns-recursor
|
||||
];
|
||||
|
||||
my.pdns.auth = {
|
||||
enable = true;
|
||||
settings = {
|
||||
primary = true;
|
||||
resolver = "127.0.0.1";
|
||||
expand-alias = true;
|
||||
local-address = [
|
||||
"0.0.0.0:5353" "[::]:5353"
|
||||
];
|
||||
also-notify = [ "127.0.0.1" ];
|
||||
};
|
||||
|
||||
bind.zones =
|
||||
let
|
||||
genRecords = f:
|
||||
concatStringsSep
|
||||
"\n"
|
||||
(mapAttrsToList
|
||||
(_: as: f as.internal)
|
||||
(filterAttrs (_: as: as ? "internal" && as.internal.visible) allAssignments));
|
||||
|
||||
intRecords =
|
||||
genRecords (a: ''
|
||||
${a.name} IN A ${a.ipv4.address}
|
||||
${a.name} IN AAAA ${a.ipv6.address}
|
||||
${concatMapStringsSep "\n" (alt: "${alt} IN CNAME ${a.name}") a.altNames}
|
||||
'');
|
||||
intPtrRecords =
|
||||
genRecords (a: ''@@PTR:${a.ipv4.address}:${toString ptrDots}@@ IN PTR ${a.name}.${config.networking.domain}.'');
|
||||
intPtr6Records =
|
||||
genRecords (a: ''@@PTR:${a.ipv6.address}:${toString ptrDots6}@@ IN PTR ${a.name}.${config.networking.domain}.'');
|
||||
in
|
||||
{
|
||||
"${config.networking.domain}" = {
|
||||
type = "master";
|
||||
text = ''
|
||||
$TTL 60
|
||||
@ IN SOA ns.${config.networking.domain}. dev.nul.ie. (
|
||||
@@SERIAL@@ ; serial
|
||||
3h ; refresh
|
||||
1h ; retry
|
||||
1w ; expire
|
||||
1h ; minimum
|
||||
)
|
||||
|
||||
@ IN NS ns
|
||||
ns IN A 188.141.14.6
|
||||
ns IN AAAA 2a0e:97c0:4d0:bbbf::1
|
||||
|
||||
@ IN ALIAS ${config.networking.fqdn}.
|
||||
|
||||
${intRecords}
|
||||
'';
|
||||
};
|
||||
"${reverseZone}" = {
|
||||
type = "master";
|
||||
text = ''
|
||||
$TTL 60
|
||||
@ IN SOA ns.${config.networking.domain}. dev.nul.ie (
|
||||
@@SERIAL@@ ; serial
|
||||
3h ; refresh
|
||||
1h ; retry
|
||||
1w ; expire
|
||||
1h ; minimum
|
||||
)
|
||||
|
||||
@ IN NS ns.${config.networking.domain}.
|
||||
|
||||
${intPtrRecords}
|
||||
'';
|
||||
};
|
||||
"${reverseZone6}" = {
|
||||
type = "master";
|
||||
text = ''
|
||||
$TTL 60
|
||||
@ IN SOA ns.${config.networking.domain}. dev.nul.ie (
|
||||
@@SERIAL@@ ; serial
|
||||
3h ; refresh
|
||||
1h ; retry
|
||||
1w ; expire
|
||||
1h ; minimum
|
||||
)
|
||||
|
||||
@ IN NS ns.${config.networking.domain}.
|
||||
|
||||
${intPtr6Records}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
Reference in New Issue
Block a user