From 25872524f0ed0e16c382754d8d10ddb44c23c50c Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Sun, 30 Apr 2023 04:02:15 +0200 Subject: [PATCH 1/3] nixos/qemu-vm: add `virtualisation.tpm` for running TPM in QEMU infrastructure --- nixos/modules/virtualisation/qemu-vm.nix | 44 +++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix index d0a5ddd87ccf..a1606839b6ce 100644 --- a/nixos/modules/virtualisation/qemu-vm.nix +++ b/nixos/modules/virtualisation/qemu-vm.nix @@ -198,6 +198,16 @@ let fi ''} + ${lib.optionalString cfg.tpm.enable '' + NIX_SWTPM_DIR=$(readlink -f "''${NIX_SWTPM_DIR:-${config.system.name}-swtpm}") + mkdir -p "$NIX_SWTPM_DIR" + ${lib.getExe cfg.tpm.package} \ + socket \ + --tpmstate dir="$NIX_SWTPM_DIR" \ + --ctrl type=unixio,path="$NIX_SWTPM_DIR"/socket \ + "--tpm2" 1>"$NIX_SWTPM_DIR"/stdout 2>"$NIX_SWTPM_DIR"/stderr & + ''} + cd "$TMPDIR" ${lib.optionalString (cfg.emptyDiskImages != []) "idx=0"} @@ -862,6 +872,32 @@ in }; }; + virtualisation.tpm = { + enable = mkEnableOption "a TPM device in the virtual machine with a driver, using swtpm."; + + package = mkPackageOptionMD cfg.host.pkgs "swtpm" { }; + + deviceModel = mkOption { + type = types.str; + default = ({ + "i686-linux" = "tpm-tis"; + "x86_64-linux" = "tpm-tis"; + "ppc64-linux" = "tpm-spapr"; + "armv7-linux" = "tpm-tis-device"; + "aarch64-linux" = "tpm-tis-device"; + }.${pkgs.hostPlatform.system} or (throw "Unsupported system for TPM2 emulation in QEMU")); + defaultText = '' + Based on the guest platform Linux system: + + - `tpm-tis` for (i686, x86_64) + - `tpm-spapr` for ppc64 + - `tpm-tis-device` for (armv7, aarch64) + ''; + example = "tpm-tis-device"; + description = lib.mdDoc "QEMU device model for the TPM, uses the appropriate default based on th guest platform system and the package passed."; + }; + }; + virtualisation.useDefaultFilesystems = mkOption { type = types.bool; @@ -1027,7 +1063,8 @@ in boot.initrd.availableKernelModules = optional cfg.writableStore "overlay" - ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx"; + ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx" + ++ optional (cfg.tpm.enable) "tpm_tis"; virtualisation.additionalPaths = [ config.system.build.toplevel ]; @@ -1098,6 +1135,11 @@ in (mkIf (!cfg.graphics) [ "-nographic" ]) + (mkIf (cfg.tpm.enable) [ + "-chardev socket,id=chrtpm,path=\"$NIX_SWTPM_DIR\"/socket" + "-tpmdev emulator,id=tpm_dev_0,chardev=chrtpm" + "-device ${cfg.tpm.deviceModel},tpmdev=tpm_dev_0" + ]) ]; virtualisation.qemu.drives = mkMerge [ From 83b131bb55bde0511a17d3beb52145d778dd4105 Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Sun, 30 Apr 2023 04:02:51 +0200 Subject: [PATCH 2/3] nixos/tests: adopt newest TPM support in test infra --- nixos/tests/systemd-credentials-tpm2.nix | 59 +----------------------- nixos/tests/systemd-cryptenroll.nix | 53 ++++++++------------- nixos/tests/systemd-initrd-luks-tpm2.nix | 27 +---------- 3 files changed, 23 insertions(+), 116 deletions(-) diff --git a/nixos/tests/systemd-credentials-tpm2.nix b/nixos/tests/systemd-credentials-tpm2.nix index d2dc1fd7b615..bf7418312236 100644 --- a/nixos/tests/systemd-credentials-tpm2.nix +++ b/nixos/tests/systemd-credentials-tpm2.nix @@ -1,13 +1,4 @@ -import ./make-test-python.nix ({ lib, pkgs, system, ... }: - -let - tpmSocketPath = "/tmp/swtpm-sock"; - tpmDeviceModels = { - x86_64-linux = "tpm-tis"; - aarch64-linux = "tpm-tis-device"; - }; -in - +import ./make-test-python.nix ({ lib, pkgs, ... }: { name = "systemd-credentials-tpm2"; @@ -16,51 +7,11 @@ in }; nodes.machine = { pkgs, ... }: { - virtualisation = { - qemu.options = [ - "-chardev socket,id=chrtpm,path=${tpmSocketPath}" - "-tpmdev emulator,id=tpm_dev_0,chardev=chrtpm" - "-device ${tpmDeviceModels.${system}},tpmdev=tpm_dev_0" - ]; - }; - - boot.initrd.availableKernelModules = [ "tpm_tis" ]; - + virtualisation.tpm.enable = true; environment.systemPackages = with pkgs; [ diffutils ]; }; testScript = '' - import subprocess - from tempfile import TemporaryDirectory - - # From systemd-initrd-luks-tpm2.nix - class Tpm: - def __init__(self): - self.state_dir = TemporaryDirectory() - self.start() - - def start(self): - self.proc = subprocess.Popen(["${pkgs.swtpm}/bin/swtpm", - "socket", - "--tpmstate", f"dir={self.state_dir.name}", - "--ctrl", "type=unixio,path=${tpmSocketPath}", - "--tpm2", - ]) - - # Check whether starting swtpm failed - try: - exit_code = self.proc.wait(timeout=0.2) - if exit_code is not None and exit_code != 0: - raise Exception("failed to start swtpm") - except subprocess.TimeoutExpired: - pass - - """Check whether the swtpm process exited due to an error""" - def check(self): - exit_code = self.proc.poll() - if exit_code is not None and exit_code != 0: - raise Exception("swtpm process died") - CRED_NAME = "testkey" CRED_RAW_FILE = f"/root/{CRED_NAME}" CRED_FILE = f"/root/{CRED_NAME}.cred" @@ -85,12 +36,6 @@ in machine.log("systemd-run finished successfully") - tpm = Tpm() - - @polling_condition - def swtpm_running(): - tpm.check() - machine.wait_for_unit("multi-user.target") with subtest("Check whether TPM device exists"): diff --git a/nixos/tests/systemd-cryptenroll.nix b/nixos/tests/systemd-cryptenroll.nix index 055ae7d1681f..034aae1d5e95 100644 --- a/nixos/tests/systemd-cryptenroll.nix +++ b/nixos/tests/systemd-cryptenroll.nix @@ -8,47 +8,34 @@ import ./make-test-python.nix ({ pkgs, ... }: { environment.systemPackages = [ pkgs.cryptsetup ]; virtualisation = { emptyDiskImages = [ 512 ]; - qemu.options = [ - "-chardev socket,id=chrtpm,path=/tmp/swtpm-sock" - "-tpmdev emulator,id=tpm0,chardev=chrtpm" - "-device tpm-tis,tpmdev=tpm0" - ]; + tpm.enable = true; }; }; testScript = '' - import subprocess - import tempfile + machine.start() - def start_swtpm(tpmstate): - subprocess.Popen(["${pkgs.swtpm}/bin/swtpm", "socket", "--tpmstate", "dir="+tpmstate, "--ctrl", "type=unixio,path=/tmp/swtpm-sock", "--log", "level=0", "--tpm2"]) + # Verify the TPM device is available and accessible by systemd-cryptenroll + machine.succeed("test -e /dev/tpm0") + machine.succeed("test -e /dev/tpmrm0") + machine.succeed("systemd-cryptenroll --tpm2-device=list") - with tempfile.TemporaryDirectory() as tpmstate: - start_swtpm(tpmstate) - machine.start() + # Create LUKS partition + machine.succeed("echo -n lukspass | cryptsetup luksFormat -q /dev/vdb -") + # Enroll new LUKS key and bind it to Secure Boot state + # For more details on PASSWORD variable, check the following issue: + # https://github.com/systemd/systemd/issues/20955 + machine.succeed("PASSWORD=lukspass systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=7 /dev/vdb") + # Add LUKS partition to /etc/crypttab to test auto unlock + machine.succeed("echo 'luks /dev/vdb - tpm2-device=auto' >> /etc/crypttab") - # Verify the TPM device is available and accessible by systemd-cryptenroll - machine.succeed("test -e /dev/tpm0") - machine.succeed("test -e /dev/tpmrm0") - machine.succeed("systemd-cryptenroll --tpm2-device=list") + machine.shutdown() + machine.start() - # Create LUKS partition - machine.succeed("echo -n lukspass | cryptsetup luksFormat -q /dev/vdb -") - # Enroll new LUKS key and bind it to Secure Boot state - # For more details on PASSWORD variable, check the following issue: - # https://github.com/systemd/systemd/issues/20955 - machine.succeed("PASSWORD=lukspass systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=7 /dev/vdb") - # Add LUKS partition to /etc/crypttab to test auto unlock - machine.succeed("echo 'luks /dev/vdb - tpm2-device=auto' >> /etc/crypttab") - machine.shutdown() - - start_swtpm(tpmstate) - machine.start() - - # Test LUKS partition automatic unlock on boot - machine.wait_for_unit("systemd-cryptsetup@luks.service") - # Wipe TPM2 slot - machine.succeed("systemd-cryptenroll --wipe-slot=tpm2 /dev/vdb") + # Test LUKS partition automatic unlock on boot + machine.wait_for_unit("systemd-cryptsetup@luks.service") + # Wipe TPM2 slot + machine.succeed("systemd-cryptenroll --wipe-slot=tpm2 /dev/vdb") ''; }) diff --git a/nixos/tests/systemd-initrd-luks-tpm2.nix b/nixos/tests/systemd-initrd-luks-tpm2.nix index d9dd9118a3a2..e292acfd1c5f 100644 --- a/nixos/tests/systemd-initrd-luks-tpm2.nix +++ b/nixos/tests/systemd-initrd-luks-tpm2.nix @@ -9,7 +9,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: { # Booting off the TPM2-encrypted device requires an available init script mountHostNixStore = true; useEFIBoot = true; - qemu.options = ["-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0"]; + tpm.enable = true; }; boot.loader.systemd-boot.enable = true; @@ -33,29 +33,6 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: { }; testScript = '' - import subprocess - import os - import time - - - class Tpm: - def __init__(self): - os.mkdir("/tmp/mytpm1") - self.start() - - def start(self): - self.proc = subprocess.Popen(["${pkgs.swtpm}/bin/swtpm", "socket", "--tpmstate", "dir=/tmp/mytpm1", "--ctrl", "type=unixio,path=/tmp/mytpm1/swtpm-sock", "--log", "level=20", "--tpm2"]) - - def wait_for_death_then_restart(self): - while self.proc.poll() is None: - print("waiting for tpm to die") - time.sleep(1) - assert self.proc.returncode == 0 - self.start() - - tpm = Tpm() - - # Create encrypted volume machine.wait_for_unit("multi-user.target") machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -") @@ -66,8 +43,6 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: { machine.succeed("sync") machine.crash() - tpm.wait_for_death_then_restart() - # Boot and decrypt the disk machine.wait_for_unit("multi-user.target") assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount") From 08f4fe20874ea19c55849138fb3af734cb72a5a1 Mon Sep 17 00:00:00 2001 From: Arthur Gautier Date: Tue, 1 Aug 2023 07:05:58 +0000 Subject: [PATCH 3/3] qemu-vm: stop the swtpm once qemu stops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The idea is to run an async process waiting for swtpm and we have to ensure that `FD_CLOEXEC` is cleared on this process' stdin file descriptor, we use `fdflags` for this, a loadable builtin in Bash ≥ 5. The async process when exited will terminate `swtpm`, we bind the termination of the async process to the termination of QEMU by virtue of having `qemu` exec in that Bash script. Signed-off-by: Arthur Gautier Co-authored-by: Raito Bezarius --- nixos/modules/virtualisation/qemu-vm.nix | 27 ++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix index a1606839b6ce..3bf8bbd9dab0 100644 --- a/nixos/modules/virtualisation/qemu-vm.nix +++ b/nixos/modules/virtualisation/qemu-vm.nix @@ -204,8 +204,31 @@ let ${lib.getExe cfg.tpm.package} \ socket \ --tpmstate dir="$NIX_SWTPM_DIR" \ - --ctrl type=unixio,path="$NIX_SWTPM_DIR"/socket \ - "--tpm2" 1>"$NIX_SWTPM_DIR"/stdout 2>"$NIX_SWTPM_DIR"/stderr & + --ctrl type=unixio,path="$NIX_SWTPM_DIR"/socket,terminate \ + --pid file="$NIX_SWTPM_DIR"/pid --daemon \ + --tpm2 \ + --log file="$NIX_SWTPM_DIR"/stdout,level=6 + + # Enable `fdflags` builtin in Bash + # We will need it to perform surgical modification of the file descriptor + # passed in the coprocess to remove `FD_CLOEXEC`, i.e. close the file descriptor + # on exec. + # If let alone, it will trigger the coprocess to read EOF when QEMU is `exec` + # at the end of this script. To work around that, we will just clear + # the `FD_CLOEXEC` bits as a first step. + enable -f ${hostPkgs.bash}/lib/bash/fdflags fdflags + # leave a dangling subprocess because the swtpm ctrl socket has + # "terminate" when the last connection disconnects, it stops swtpm. + # When qemu stops, or if the main shell process ends, the coproc will + # get signaled by virtue of the pipe between main and coproc ending. + # Which in turns triggers a socat connect-disconnect to swtpm which + # will stop it. + coproc waitingswtpm { + read || : + echo "" | ${lib.getExe hostPkgs.socat} STDIO UNIX-CONNECT:"$NIX_SWTPM_DIR"/socket + } + # Clear `FD_CLOEXEC` on the coprocess' file descriptor stdin. + fdflags -s-cloexec ''${waitingswtpm[1]} ''} cd "$TMPDIR"