Merge master into staging-next
This commit is contained in:
commit
761210ff4f
@ -9848,6 +9848,11 @@
|
||||
githubId = 1918771;
|
||||
name = "Joe Doyle";
|
||||
};
|
||||
jpds = {
|
||||
github = "jpds";
|
||||
githubId = 29158971;
|
||||
name = "Jonathan Davies";
|
||||
};
|
||||
jpentland = {
|
||||
email = "joe.pentland@gmail.com";
|
||||
github = "jpentland";
|
||||
@ -10618,6 +10623,11 @@
|
||||
github = "kkharji";
|
||||
githubId = 65782666;
|
||||
};
|
||||
kkoniuszy = {
|
||||
name = "Kacper Koniuszy";
|
||||
github = "kkoniuszy";
|
||||
githubId = 120419423;
|
||||
};
|
||||
klden = {
|
||||
name = "Kenzyme Le";
|
||||
email = "kl@kenzymele.com";
|
||||
|
@ -880,6 +880,7 @@
|
||||
./services/monitoring/osquery.nix
|
||||
./services/monitoring/parsedmarc.nix
|
||||
./services/monitoring/prometheus/alertmanager-irc-relay.nix
|
||||
./services/monitoring/prometheus/alertmanager-webhook-logger.nix
|
||||
./services/monitoring/prometheus/alertmanager.nix
|
||||
./services/monitoring/prometheus/default.nix
|
||||
./services/monitoring/prometheus/exporters.nix
|
||||
|
@ -518,8 +518,9 @@ in {
|
||||
# recreate symlinks for desired components
|
||||
declare -a components=(${escapeShellArgs cfg.customComponents})
|
||||
for component in "''${components[@]}"; do
|
||||
path="$(dirname $(find "$component" -name "manifest.json"))"
|
||||
ln -fns "$path" "${cfg.configDir}/custom_components/"
|
||||
readarray -t manifests < <(find "$component" -name manifest.json)
|
||||
readarray -t paths < <(dirname "''${manifests[@]}")
|
||||
ln -fns "''${paths[@]}" "${cfg.configDir}/custom_components/"
|
||||
done
|
||||
'';
|
||||
in
|
||||
|
@ -324,7 +324,8 @@ in
|
||||
};
|
||||
preStart =
|
||||
let
|
||||
version = pkgs.sourcehut.${srvsrht}.version;
|
||||
package = pkgs.sourcehut.${srvsrht};
|
||||
version = package.version;
|
||||
stateDir = "/var/lib/sourcehut/${srvsrht}";
|
||||
in
|
||||
mkBefore ''
|
||||
@ -336,14 +337,14 @@ in
|
||||
if test ! -e ${stateDir}/db; then
|
||||
# Setup the initial database.
|
||||
# Note that it stamps the alembic head afterward
|
||||
${cfg.python}/bin/${srvsrht}-initdb
|
||||
${package}/bin/${srvsrht}-initdb
|
||||
echo ${version} >${stateDir}/db
|
||||
fi
|
||||
|
||||
${optionalString cfg.settings.${iniKey}.migrate-on-upgrade ''
|
||||
if [ "$(cat ${stateDir}/db)" != "${version}" ]; then
|
||||
# Manage schema migrations using alembic
|
||||
${cfg.python}/bin/${srvsrht}-migrate -a upgrade head
|
||||
${package}/bin/${srvsrht}-migrate -a upgrade head
|
||||
echo ${version} >${stateDir}/db
|
||||
fi
|
||||
''}
|
||||
@ -389,7 +390,7 @@ in
|
||||
after = [ "network.target" "${srvsrht}.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${cfg.python}/bin/${timerName}";
|
||||
ExecStart = "${pkgs.sourcehut.${srvsrht}}/bin/${timerName}";
|
||||
};
|
||||
}
|
||||
(timer.service or { })
|
||||
|
@ -0,0 +1,70 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.alertmanagerWebhookLogger;
|
||||
in
|
||||
{
|
||||
options.services.prometheus.alertmanagerWebhookLogger = {
|
||||
enable = mkEnableOption "Alertmanager Webhook Logger";
|
||||
|
||||
package = mkPackageOption pkgs "alertmanager-webhook-logger" { };
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = "Extra command line options to pass to alertmanager-webhook-logger.";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.alertmanager-webhook-logger = {
|
||||
description = "Alertmanager Webhook Logger";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/alertmanager-webhook-logger \
|
||||
${escapeShellArgs cfg.extraFlags}
|
||||
'';
|
||||
|
||||
DynamicUser = true;
|
||||
NoNewPrivileges = true;
|
||||
|
||||
ProtectProc = "invisible";
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "tmpfs";
|
||||
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
PrivateIPC = true;
|
||||
|
||||
ProtectHostname = true;
|
||||
ProtectClock = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@cpu-emulation"
|
||||
"~@privileged"
|
||||
"~@reboot"
|
||||
"~@setuid"
|
||||
"~@swap"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = [ maintainers.jpds ];
|
||||
}
|
@ -775,7 +775,7 @@ in {
|
||||
printing-service = handleTest ./printing.nix { socket = false; };
|
||||
private-gpt = handleTest ./private-gpt.nix {};
|
||||
privoxy = handleTest ./privoxy.nix {};
|
||||
prometheus = handleTest ./prometheus.nix {};
|
||||
prometheus = handleTest ./prometheus {};
|
||||
prometheus-exporters = handleTest ./prometheus-exporters.nix {};
|
||||
prosody = handleTest ./xmpp/prosody.nix {};
|
||||
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix {};
|
||||
@ -968,6 +968,7 @@ in {
|
||||
teleport = handleTest ./teleport.nix {};
|
||||
thelounge = handleTest ./thelounge.nix {};
|
||||
terminal-emulators = handleTest ./terminal-emulators.nix {};
|
||||
thanos = handleTest ./thanos.nix {};
|
||||
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
||||
tigervnc = handleTest ./tigervnc.nix {};
|
||||
timescaledb = handleTest ./timescaledb.nix {};
|
||||
|
@ -44,6 +44,8 @@ in {
|
||||
# test loading custom components
|
||||
customComponents = with pkgs.home-assistant-custom-components; [
|
||||
prometheus_sensor
|
||||
# tests loading multiple components from a single package
|
||||
spook
|
||||
];
|
||||
|
||||
# test loading lovelace modules
|
||||
@ -179,7 +181,8 @@ in {
|
||||
|
||||
with subtest("Check that custom components get installed"):
|
||||
hass.succeed("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
|
||||
hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
|
||||
for integration in ("prometheus_sensor", "spook", "spook_inverse"):
|
||||
hass.wait_until_succeeds(f"journalctl -u home-assistant.service | grep -q 'We found a custom integration {integration} which has not been tested by Home Assistant'")
|
||||
|
||||
with subtest("Check that lovelace modules are referenced and fetchable"):
|
||||
hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
|
||||
@ -228,7 +231,8 @@ in {
|
||||
cursor = get_journal_cursor()
|
||||
hass.succeed("${system}/specialisation/removeCustomThings/bin/switch-to-configuration test")
|
||||
hass.fail("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
|
||||
hass.fail("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
|
||||
for integration in ("prometheus_sensor", "spook", "spook_inverse"):
|
||||
hass.fail(f"test -f ${configDir}/custom_components/{integration}/manifest.json")
|
||||
wait_for_homeassistant(cursor)
|
||||
|
||||
with subtest("Check that no errors were logged"):
|
||||
|
148
nixos/tests/prometheus/alertmanager.nix
Normal file
148
nixos/tests/prometheus/alertmanager.nix
Normal file
@ -0,0 +1,148 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-alertmanager";
|
||||
|
||||
nodes = {
|
||||
prometheus = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
alertmanagers = [
|
||||
{
|
||||
scheme = "http";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
rules = [
|
||||
''
|
||||
groups:
|
||||
- name: test
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 5s
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
''
|
||||
];
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "alertmanager";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"node:${toString config.services.prometheus.exporters.node.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
alertmanager = { config, pkgs, ... }: {
|
||||
services.prometheus.alertmanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
configuration = {
|
||||
global = {
|
||||
resolve_timeout = "1m";
|
||||
};
|
||||
|
||||
route = {
|
||||
# Root route node
|
||||
receiver = "test";
|
||||
group_by = ["..."];
|
||||
continue = false;
|
||||
group_wait = "1s";
|
||||
group_interval = "15s";
|
||||
repeat_interval = "24h";
|
||||
};
|
||||
|
||||
receivers = [
|
||||
{
|
||||
name = "test";
|
||||
webhook_configs = [
|
||||
{
|
||||
url = "http://logger:6725";
|
||||
send_resolved = true;
|
||||
max_alerts = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
logger = { config, pkgs, ... }: {
|
||||
networking.firewall.allowedTCPPorts = [ 6725 ];
|
||||
|
||||
services.prometheus.alertmanagerWebhookLogger.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
alertmanager.wait_for_unit("alertmanager")
|
||||
alertmanager.wait_for_open_port(9093)
|
||||
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
|
||||
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
|
||||
|
||||
logger.wait_for_unit("alertmanager-webhook-logger")
|
||||
logger.wait_for_open_port(6725)
|
||||
|
||||
prometheus.wait_for_unit("prometheus")
|
||||
prometheus.wait_for_open_port(9090)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
|
||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
|
||||
)
|
||||
|
||||
logger.wait_until_succeeds(
|
||||
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
|
||||
)
|
||||
'';
|
||||
})
|
116
nixos/tests/prometheus/config-reload.nix
Normal file
116
nixos/tests/prometheus/config-reload.nix
Normal file
@ -0,0 +1,116 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-config-reload";
|
||||
|
||||
nodes = {
|
||||
prometheus = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
enableReload = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
specialisation = {
|
||||
"prometheus-config-change" = {
|
||||
configuration = {
|
||||
environment.systemPackages = [ pkgs.yq ];
|
||||
|
||||
# This configuration just adds a new prometheus job
|
||||
# to scrape the node_exporter metrics of the s3 machine.
|
||||
services.prometheus = {
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
prometheus.wait_for_unit("prometheus")
|
||||
prometheus.wait_for_open_port(9090)
|
||||
|
||||
# Check if switching to a NixOS configuration that changes the prometheus
|
||||
# configuration reloads (instead of restarts) prometheus before the switch
|
||||
# finishes successfully:
|
||||
with subtest("config change reloads prometheus"):
|
||||
import json
|
||||
# We check if prometheus has finished reloading by looking for the message
|
||||
# "Completed loading of configuration file" in the journal between the start
|
||||
# and finish of switching to the new NixOS configuration.
|
||||
#
|
||||
# To mark the start we record the journal cursor before starting the switch:
|
||||
cursor_before_switching = json.loads(
|
||||
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
||||
)["__CURSOR"]
|
||||
|
||||
# Now we switch:
|
||||
prometheus_config_change = prometheus.succeed(
|
||||
"readlink /run/current-system/specialisation/prometheus-config-change"
|
||||
).strip()
|
||||
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
||||
|
||||
# Next we retrieve all logs since the start of switching:
|
||||
logs_after_starting_switching = prometheus.succeed(
|
||||
"""
|
||||
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
||||
""".format(
|
||||
cursor_before_switching=cursor_before_switching
|
||||
)
|
||||
)
|
||||
|
||||
# Finally we check if the message "Completed loading of configuration file"
|
||||
# occurs before the "finished switching to system configuration" message:
|
||||
finished_switching_msg = (
|
||||
"finished switching to system configuration " + prometheus_config_change
|
||||
)
|
||||
reloaded_before_switching_finished = False
|
||||
finished_switching = False
|
||||
for log_line in logs_after_starting_switching.split("\n"):
|
||||
msg = json.loads(log_line)["MESSAGE"]
|
||||
if "Completed loading of configuration file" in msg:
|
||||
reloaded_before_switching_finished = True
|
||||
if msg == finished_switching_msg:
|
||||
finished_switching = True
|
||||
break
|
||||
|
||||
assert reloaded_before_switching_finished
|
||||
assert finished_switching
|
||||
|
||||
# Check if the reloaded config includes the new node job:
|
||||
prometheus.succeed(
|
||||
"""
|
||||
curl -sf http://127.0.0.1:9090/api/v1/status/config \
|
||||
| jq -r .data.yaml \
|
||||
| yq '.scrape_configs | any(.job_name == "node")' \
|
||||
| grep true
|
||||
"""
|
||||
)
|
||||
'';
|
||||
})
|
13
nixos/tests/prometheus/default.nix
Normal file
13
nixos/tests/prometheus/default.nix
Normal file
@ -0,0 +1,13 @@
|
||||
{ system ? builtins.currentSystem
|
||||
, config ? { }
|
||||
, pkgs ? import ../../.. { inherit system config; }
|
||||
}:
|
||||
|
||||
{
|
||||
alertmanager = import ./alertmanager.nix { inherit system pkgs; };
|
||||
config-reload = import ./config-reload.nix { inherit system pkgs; };
|
||||
federation = import ./federation.nix { inherit system pkgs; };
|
||||
prometheus-pair = import ./prometheus-pair.nix { inherit system pkgs; };
|
||||
pushgateway = import ./pushgateway.nix { inherit system pkgs; };
|
||||
remote-write = import ./remote-write.nix { inherit system pkgs; };
|
||||
}
|
213
nixos/tests/prometheus/federation.nix
Normal file
213
nixos/tests/prometheus/federation.nix
Normal file
@ -0,0 +1,213 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-federation";
|
||||
|
||||
nodes = {
|
||||
global1 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "federate";
|
||||
honor_labels = true;
|
||||
metrics_path = "/federate";
|
||||
|
||||
params = {
|
||||
"match[]" = [
|
||||
"{job=\"node\"}"
|
||||
"{job=\"prometheus\"}"
|
||||
];
|
||||
};
|
||||
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus1:${toString config.services.prometheus.port}"
|
||||
"prometheus2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"global1:${toString config.services.prometheus.port}"
|
||||
"global2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
global2 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "federate";
|
||||
honor_labels = true;
|
||||
metrics_path = "/federate";
|
||||
|
||||
params = {
|
||||
"match[]" = [
|
||||
"{job=\"node\"}"
|
||||
"{job=\"prometheus\"}"
|
||||
];
|
||||
};
|
||||
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus1:${toString config.services.prometheus.port}"
|
||||
"prometheus2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"global1:${toString config.services.prometheus.port}"
|
||||
"global2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
prometheus1 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"node1:${toString config.services.prometheus.exporters.node.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus1:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
prometheus2 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"node2:${toString config.services.prometheus.exporters.node.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
node1 = { config, pkgs, ... }: {
|
||||
services.prometheus.exporters.node = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
|
||||
node2 = { config, pkgs, ... }: {
|
||||
services.prometheus.exporters.node = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
for machine in node1, node2:
|
||||
machine.wait_for_unit("prometheus-node-exporter")
|
||||
machine.wait_for_open_port(9100)
|
||||
|
||||
for machine in prometheus1, prometheus2, global1, global2:
|
||||
machine.wait_for_unit("prometheus")
|
||||
machine.wait_for_open_port(9090)
|
||||
|
||||
# Verify both servers got the same data from the exporter
|
||||
for machine in prometheus1, prometheus2:
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
for machine in global1, global2:
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
|
||||
)
|
||||
'';
|
||||
})
|
87
nixos/tests/prometheus/prometheus-pair.nix
Normal file
87
nixos/tests/prometheus/prometheus-pair.nix
Normal file
@ -0,0 +1,87 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-pair";
|
||||
|
||||
nodes = {
|
||||
prometheus1 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus1:${toString config.services.prometheus.port}"
|
||||
"prometheus2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
prometheus2 = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"prometheus1:${toString config.services.prometheus.port}"
|
||||
"prometheus2:${toString config.services.prometheus.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
for machine in prometheus1, prometheus2:
|
||||
machine.wait_for_unit("prometheus")
|
||||
machine.wait_for_open_port(9090)
|
||||
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
|
||||
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
|
||||
|
||||
# Prometheii ready - run some queries
|
||||
for machine in prometheus1, prometheus2:
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||
)
|
||||
|
||||
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v '✓'"))
|
||||
'';
|
||||
})
|
94
nixos/tests/prometheus/pushgateway.nix
Normal file
94
nixos/tests/prometheus/pushgateway.nix
Normal file
@ -0,0 +1,94 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-pushgateway";
|
||||
|
||||
nodes = {
|
||||
prometheus = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "pushgateway";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"pushgateway:9091"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
pushgateway = { config, pkgs, ... }: {
|
||||
networking.firewall.allowedTCPPorts = [ 9091 ];
|
||||
|
||||
services.prometheus.pushgateway = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
client = { config, pkgs, ... }: {
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
pushgateway.wait_for_unit("pushgateway")
|
||||
pushgateway.wait_for_open_port(9091)
|
||||
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
|
||||
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
|
||||
|
||||
prometheus.wait_for_unit("prometheus")
|
||||
prometheus.wait_for_open_port(9090)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
|
||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
|
||||
)
|
||||
|
||||
client.wait_for_unit("network-online.target")
|
||||
|
||||
# Add a metric and check in Prometheus
|
||||
client.wait_until_succeeds(
|
||||
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep 'null'"
|
||||
)
|
||||
|
||||
# Delete the metric, check not in Prometheus
|
||||
client.wait_until_succeeds(
|
||||
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
|
||||
)
|
||||
|
||||
prometheus.wait_until_fails(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||
)
|
||||
|
||||
prometheus.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
'';
|
||||
})
|
73
nixos/tests/prometheus/remote-write.nix
Normal file
73
nixos/tests/prometheus/remote-write.nix
Normal file
@ -0,0 +1,73 @@
|
||||
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "prometheus-remote-write";
|
||||
|
||||
nodes = {
|
||||
receiver = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
extraFlags = [ "--web.enable-remote-write-receiver" ];
|
||||
};
|
||||
};
|
||||
|
||||
prometheus = { config, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "2s";
|
||||
|
||||
remoteWrite = [
|
||||
{
|
||||
url = "http://receiver:9090/api/v1/write";
|
||||
}
|
||||
];
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"node:${toString config.services.prometheus.exporters.node.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
node = { config, pkgs, ... }: {
|
||||
services.prometheus.exporters.node = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
node.wait_for_unit("prometheus-node-exporter")
|
||||
node.wait_for_open_port(9100)
|
||||
|
||||
for machine in prometheus, receiver:
|
||||
machine.wait_for_unit("prometheus")
|
||||
machine.wait_for_open_port(9090)
|
||||
|
||||
# Verify both servers got the same data from the exporter
|
||||
for machine in prometheus, receiver:
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
|
||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||
)
|
||||
'';
|
||||
})
|
@ -212,8 +212,6 @@ in import ./make-test-python.nix {
|
||||
};
|
||||
|
||||
testScript = { nodes, ... } : ''
|
||||
import json
|
||||
|
||||
# Before starting the other machines we first make sure that our S3 service is online
|
||||
# and has a bucket added for thanos:
|
||||
s3.start()
|
||||
@ -289,61 +287,5 @@ in import ./make-test-python.nix {
|
||||
+ "jq .thanos.labels.some_label | "
|
||||
+ "grep 'required by thanos'"
|
||||
)
|
||||
|
||||
# Check if switching to a NixOS configuration that changes the prometheus
|
||||
# configuration reloads (instead of restarts) prometheus before the switch
|
||||
# finishes successfully:
|
||||
with subtest("config change reloads prometheus"):
|
||||
# We check if prometheus has finished reloading by looking for the message
|
||||
# "Completed loading of configuration file" in the journal between the start
|
||||
# and finish of switching to the new NixOS configuration.
|
||||
#
|
||||
# To mark the start we record the journal cursor before starting the switch:
|
||||
cursor_before_switching = json.loads(
|
||||
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
||||
)["__CURSOR"]
|
||||
|
||||
# Now we switch:
|
||||
prometheus_config_change = prometheus.succeed(
|
||||
"readlink /run/current-system/specialisation/prometheus-config-change"
|
||||
).strip()
|
||||
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
||||
|
||||
# Next we retrieve all logs since the start of switching:
|
||||
logs_after_starting_switching = prometheus.succeed(
|
||||
"""
|
||||
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
||||
""".format(
|
||||
cursor_before_switching=cursor_before_switching
|
||||
)
|
||||
)
|
||||
|
||||
# Finally we check if the message "Completed loading of configuration file"
|
||||
# occurs before the "finished switching to system configuration" message:
|
||||
finished_switching_msg = (
|
||||
"finished switching to system configuration " + prometheus_config_change
|
||||
)
|
||||
reloaded_before_switching_finished = False
|
||||
finished_switching = False
|
||||
for log_line in logs_after_starting_switching.split("\n"):
|
||||
msg = json.loads(log_line)["MESSAGE"]
|
||||
if "Completed loading of configuration file" in msg:
|
||||
reloaded_before_switching_finished = True
|
||||
if msg == finished_switching_msg:
|
||||
finished_switching = True
|
||||
break
|
||||
|
||||
assert reloaded_before_switching_finished
|
||||
assert finished_switching
|
||||
|
||||
# Check if the reloaded config includes the new s3-node_exporter job:
|
||||
prometheus.succeed(
|
||||
"""
|
||||
curl -sf http://127.0.0.1:${toString queryPort}/api/v1/status/config \
|
||||
| jq -r .data.yaml \
|
||||
| yq '.scrape_configs | any(.job_name == "s3-node_exporter")' \
|
||||
| grep true
|
||||
"""
|
||||
)
|
||||
'';
|
||||
}
|
@ -4453,10 +4453,10 @@
|
||||
elpaBuild {
|
||||
pname = "org";
|
||||
ename = "org";
|
||||
version = "9.7pre0.20240521.135840";
|
||||
version = "9.7pre0.20240530.133120";
|
||||
src = fetchurl {
|
||||
url = "https://elpa.gnu.org/devel/org-9.7pre0.20240521.135840.tar";
|
||||
sha256 = "06j7kjbpd390i0kqvvnlkpdzqkwiicbdgjymif338l0qx2kl4sg5";
|
||||
url = "https://elpa.gnu.org/devel/org-9.7pre0.20240530.133120.tar";
|
||||
sha256 = "sha256-DuuLDBJKI2LwC0PH9PtujcPvaqaKLWYij+KzP1U7o9M=";
|
||||
};
|
||||
packageRequires = [ emacs ];
|
||||
meta = {
|
||||
|
@ -64,6 +64,26 @@ self: let
|
||||
'';
|
||||
});
|
||||
|
||||
org = super.org.overrideAttrs (old: {
|
||||
dontUnpack = false;
|
||||
patches = old.patches or [ ] ++ [
|
||||
# security fix backported from 9.7.5
|
||||
(pkgs.fetchpatch {
|
||||
url = "https://git.savannah.gnu.org/cgit/emacs/org-mode.git/patch/?id=f4cc61636947b5c2f0afc67174dd369fe3277aa8";
|
||||
hash = "sha256-bGgsnTSn6SMu1J8P2BfJjrKx2845FCsUB2okcIrEjDg=";
|
||||
stripLen = 1;
|
||||
})
|
||||
];
|
||||
postPatch = old.postPatch or "" + "\n" + ''
|
||||
pushd ..
|
||||
local content_directory=${old.ename}-${old.version}
|
||||
src=$PWD/$content_directory.tar
|
||||
tar --create --verbose --file=$src $content_directory
|
||||
popd
|
||||
'';
|
||||
dontBuild = true;
|
||||
});
|
||||
|
||||
pq = super.pq.overrideAttrs (old: {
|
||||
buildInputs = (old.buildInputs or [ ]) ++ [ pkgs.postgresql ];
|
||||
});
|
||||
|
@ -141,6 +141,26 @@ self: let
|
||||
};
|
||||
});
|
||||
|
||||
org = super.org.overrideAttrs (old: {
|
||||
dontUnpack = false;
|
||||
patches = old.patches or [ ] ++ [
|
||||
# security fix backported from 9.7.5
|
||||
(pkgs.fetchpatch {
|
||||
url = "https://git.savannah.gnu.org/cgit/emacs/org-mode.git/patch/?id=f4cc61636947b5c2f0afc67174dd369fe3277aa8";
|
||||
hash = "sha256-bGgsnTSn6SMu1J8P2BfJjrKx2845FCsUB2okcIrEjDg=";
|
||||
stripLen = 1;
|
||||
})
|
||||
];
|
||||
postPatch = old.postPatch or "" + "\n" + ''
|
||||
pushd ..
|
||||
local content_directory=${old.ename}-${old.version}
|
||||
src=$PWD/$content_directory.tar
|
||||
tar --create --verbose --file=$src $content_directory
|
||||
popd
|
||||
'';
|
||||
dontBuild = true;
|
||||
});
|
||||
|
||||
plz = super.plz.overrideAttrs (
|
||||
old: {
|
||||
dontUnpack = false;
|
||||
|
@ -93,6 +93,12 @@ in
|
||||
url = "https://gitweb.gentoo.org/proj/emacs-patches.git/plain/emacs/28.2/12_all_org-remote-unsafe.patch?id=af40e12cb742510e5d40a06ffc6dfca97e340dd6";
|
||||
hash = "sha256-b6WU1o3PfDV/6BTPfPNUFny6oERJCNsDrvflxX3Yvek=";
|
||||
})
|
||||
|
||||
# security fix from Emacs 29.4
|
||||
(fetchpatch {
|
||||
url = "https://git.savannah.gnu.org/cgit/emacs.git/patch/?id=c645e1d8205f0f0663ec4a2d27575b238c646c7c";
|
||||
hash = "sha256-G+gGQx5w3KuWMotR1n/sYYL8WyAABYW3fUPeffMMs38=";
|
||||
})
|
||||
];
|
||||
});
|
||||
|
||||
|
@ -1,3 +1,12 @@
|
||||
# For compatibility, convert makeWrapperArgs to an array unless we are using
|
||||
# structured attributes. That is, we ensure that makeWrapperArgs is always an
|
||||
# array.
|
||||
# See https://github.com/NixOS/nixpkgs/blob/858f4db3048c5be3527e183470e93c1a72c5727c/pkgs/build-support/dotnet/build-dotnet-module/hooks/dotnet-fixup-hook.sh#L1-L3
|
||||
# and https://github.com/NixOS/nixpkgs/pull/313005#issuecomment-2175482920
|
||||
if [[ -z $__structuredAttrs ]]; then
|
||||
makeWrapperArgs=( ${makeWrapperArgs-} )
|
||||
fi
|
||||
|
||||
# First argument is the executable you want to wrap,
|
||||
# the second is the destination for the wrapper.
|
||||
wrapDotnetProgram() {
|
||||
@ -17,10 +26,8 @@ dotnetFromEnv'
|
||||
|
||||
if [[ -n $__structuredAttrs ]]; then
|
||||
local -r dotnetRuntimeDepsArray=( "${dotnetRuntimeDeps[@]}" )
|
||||
local -r makeWrapperArgsArray=( "${makeWrapperArgs[@]}" )
|
||||
else
|
||||
local -r dotnetRuntimeDepsArray=($dotnetRuntimeDeps)
|
||||
local -r makeWrapperArgsArray=($makeWrapperArgs)
|
||||
fi
|
||||
|
||||
local dotnetRuntimeDepsFlags=()
|
||||
@ -49,7 +56,7 @@ dotnetFromEnv'
|
||||
"${dotnetRuntimeDepsFlags[@]}" \
|
||||
"${dotnetRootFlagsArray[@]}" \
|
||||
"${gappsWrapperArgs[@]}" \
|
||||
"${makeWrapperArgsArray[@]}"
|
||||
"${makeWrapperArgs[@]}"
|
||||
|
||||
echo "installed wrapper to "$2""
|
||||
}
|
||||
|
33
pkgs/by-name/al/alertmanager-webhook-logger/package.nix
Normal file
33
pkgs/by-name/al/alertmanager-webhook-logger/package.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{ lib
|
||||
, stdenv
|
||||
, buildGoModule
|
||||
, fetchFromGitHub
|
||||
, nixosTests
|
||||
}:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "alertmanager-webhook-logger";
|
||||
version = "1.0";
|
||||
rev = "${version}";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
inherit rev;
|
||||
owner = "tomtom-international";
|
||||
repo = "alertmanager-webhook-logger";
|
||||
hash = "sha256-mJbpDiTwUsFm0lDKz8UE/YF6sBvcSSR6WWLrfKvtri4=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-gKtOoM9TuEIHgvSjZhqWmdexG2zDjlPuM0HjjP52DOI=";
|
||||
|
||||
doCheck = true;
|
||||
|
||||
passthru.tests = { inherit (nixosTests.prometheus) alertmanager; };
|
||||
|
||||
meta = with lib; {
|
||||
description = "Generates (structured) log messages from Prometheus AlertManager webhook notifier";
|
||||
mainProgram = "alertmanager-webhook-logger";
|
||||
homepage = "https://github.com/tomtom-international/alertmanager-webhook-logger";
|
||||
license = licenses.asl20;
|
||||
maintainers = with maintainers; [ jpds ];
|
||||
};
|
||||
}
|
@ -2,11 +2,11 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "gpsprune";
|
||||
version = "24.1";
|
||||
version = "24.2";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://activityworkshop.net/software/gpsprune/gpsprune_${version}.jar";
|
||||
sha256 = "sha256-k7aRuz/FOqQe2C0luiUwoFxnIvgM6opwsGZV7+hxJSM=";
|
||||
sha256 = "sha256-wGg7WPj61yx7zMBIdH9ls18BnD1R713U5Vgc/kL9qYs=";
|
||||
};
|
||||
|
||||
dontUnpack = true;
|
||||
|
33
pkgs/by-name/mi/miru/darwin.nix
Normal file
33
pkgs/by-name/mi/miru/darwin.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
stdenvNoCC,
|
||||
fetchurl,
|
||||
unzip,
|
||||
makeWrapper,
|
||||
|
||||
pname,
|
||||
version,
|
||||
meta,
|
||||
}:
|
||||
stdenvNoCC.mkDerivation rec {
|
||||
inherit pname version meta;
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/ThaUnknown/miru/releases/download/v${version}/mac-Miru-${version}-mac.zip";
|
||||
hash = "sha256-OakGB5Fz1Tlxa/Uu7xHlKoEF9VRfWFQ9CjsR0eCRyQw=";
|
||||
};
|
||||
|
||||
sourceRoot = ".";
|
||||
|
||||
nativeBuildInputs = [
|
||||
unzip
|
||||
makeWrapper
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
mkdir -p $out/{bin,Applications}
|
||||
cp -r Miru.app $out/Applications/
|
||||
makeWrapper $out/Applications/Miru.app/Contents/MacOS/Miru $out/bin/miru
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
31
pkgs/by-name/mi/miru/linux.nix
Normal file
31
pkgs/by-name/mi/miru/linux.nix
Normal file
@ -0,0 +1,31 @@
|
||||
{
|
||||
fetchurl,
|
||||
appimageTools,
|
||||
|
||||
pname,
|
||||
version,
|
||||
meta,
|
||||
}:
|
||||
|
||||
appimageTools.wrapType2 rec {
|
||||
inherit pname version meta;
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/ThaUnknown/miru/releases/download/v${version}/linux-Miru-${version}.AppImage";
|
||||
name = "${pname}-${version}.AppImage";
|
||||
sha256 = "sha256-aPutbJthUhZtBYkYuUB5v88OdhOrcnqw4AhnepfO1B4=";
|
||||
};
|
||||
|
||||
extraInstallCommands =
|
||||
let
|
||||
contents = appimageTools.extractType2 { inherit pname version src; };
|
||||
in
|
||||
''
|
||||
mkdir -p "$out/share/applications"
|
||||
mkdir -p "$out/share/lib/miru"
|
||||
cp -r ${contents}/{locales,resources} "$out/share/lib/miru"
|
||||
cp -r ${contents}/usr/* "$out"
|
||||
cp "${contents}/${pname}.desktop" "$out/share/applications/"
|
||||
substituteInPlace $out/share/applications/${pname}.desktop --replace 'Exec=AppRun' 'Exec=${pname}'
|
||||
'';
|
||||
}
|
@ -1,39 +1,22 @@
|
||||
{ lib
|
||||
, fetchurl
|
||||
, appimageTools
|
||||
{
|
||||
stdenv,
|
||||
lib,
|
||||
callPackage,
|
||||
}:
|
||||
|
||||
appimageTools.wrapType2 rec {
|
||||
let
|
||||
pname = "miru";
|
||||
version = "5.1.4";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/ThaUnknown/miru/releases/download/v${version}/linux-Miru-${version}.AppImage";
|
||||
name = "${pname}-${version}.AppImage";
|
||||
sha256 = "sha256-aPutbJthUhZtBYkYuUB5v88OdhOrcnqw4AhnepfO1B4=";
|
||||
};
|
||||
|
||||
extraInstallCommands =
|
||||
let
|
||||
contents = appimageTools.extractType2 { inherit pname version src; };
|
||||
in
|
||||
''
|
||||
mkdir -p "$out/share/applications"
|
||||
mkdir -p "$out/share/lib/miru"
|
||||
cp -r ${contents}/{locales,resources} "$out/share/lib/miru"
|
||||
cp -r ${contents}/usr/* "$out"
|
||||
cp "${contents}/${pname}.desktop" "$out/share/applications/"
|
||||
substituteInPlace $out/share/applications/${pname}.desktop --replace 'Exec=AppRun' 'Exec=${pname}'
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Stream anime torrents, real-time with no waiting for downloads";
|
||||
homepage = "https://miru.watch";
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = [ maintainers.d4ilyrun ];
|
||||
maintainers = with maintainers; [
|
||||
d4ilyrun
|
||||
matteopacini
|
||||
];
|
||||
mainProgram = "miru";
|
||||
|
||||
platforms = [ "x86_64-linux" ];
|
||||
platforms = [ "x86_64-linux" ] ++ platforms.darwin;
|
||||
sourceProvenance = [ lib.sourceTypes.binaryNativeCode ];
|
||||
|
||||
longDescription = ''
|
||||
@ -49,4 +32,8 @@ appimageTools.wrapType2 rec {
|
||||
instead of flat out closing MPV.
|
||||
'';
|
||||
};
|
||||
}
|
||||
in
|
||||
if stdenv.isDarwin then
|
||||
callPackage ./darwin.nix { inherit pname version meta; }
|
||||
else
|
||||
callPackage ./linux.nix { inherit pname version meta; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
{ lib, stdenv, fetchurl, dosfstools, libseccomp, makeWrapper, mtools, parted
|
||||
, pkg-config, qemu, syslinux, util-linux }:
|
||||
, pkg-config, qemu_test, syslinux, util-linux }:
|
||||
|
||||
let
|
||||
version = "0.8.1";
|
||||
@ -55,10 +55,12 @@ in stdenv.mkDerivation {
|
||||
'';
|
||||
|
||||
doCheck = stdenv.hostPlatform.isLinux;
|
||||
nativeCheckInputs = [ util-linux qemu ];
|
||||
nativeCheckInputs = [ util-linux qemu_test ];
|
||||
checkPhase = ''
|
||||
runHook preCheck
|
||||
patchShebangs tests
|
||||
substituteInPlace scripts/virtio-run/solo5-virtio-run.sh \
|
||||
--replace " -no-acpi" ""
|
||||
./tests/bats-core/bats ./tests/tests.bats
|
||||
runHook postCheck
|
||||
'';
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "textual";
|
||||
version = "0.68.0";
|
||||
version = "0.70.0";
|
||||
pyproject = true;
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -26,7 +26,7 @@ buildPythonPackage rec {
|
||||
owner = "Textualize";
|
||||
repo = "textual";
|
||||
rev = "refs/tags/v${version}";
|
||||
hash = "sha256-nxQVxe7lXMcxyqh4SWcQ/A6eQcEzkSshKmpweHpn7JE=";
|
||||
hash = "sha256-nOgiMT+q2pc94Q6g3MvKQVhGJYSGNnoE+5/foK62zLM=";
|
||||
};
|
||||
|
||||
build-system = [ poetry-core ];
|
||||
|
@ -8,9 +8,9 @@ in
|
||||
|
||||
buildDotnetGlobalTool rec {
|
||||
pname = "csharp-ls";
|
||||
version = "0.13.0";
|
||||
version = "0.14.0";
|
||||
|
||||
nugetSha256 = "sha256-hhgMwDk3mT7E07REqZduTuEnS7D1tCgdxqN+MLNo9EI=";
|
||||
nugetSha256 = "sha256-agcx7VPIqGhl3NzdGLPwXYJsRuvSjL4SdbNg9vFjIh4=";
|
||||
|
||||
dotnet-sdk = sdk_8_0;
|
||||
dotnet-runtime = sdk_8_0;
|
||||
|
@ -48,6 +48,8 @@
|
||||
|
||||
smartthinq-sensors = callPackage ./smartthinq-sensors {};
|
||||
|
||||
spook = callPackage ./spook {};
|
||||
|
||||
tuya_local = callPackage ./tuya_local {};
|
||||
|
||||
waste_collection_schedule = callPackage ./waste_collection_schedule {};
|
||||
|
@ -0,0 +1,38 @@
|
||||
{
|
||||
lib,
|
||||
buildHomeAssistantComponent,
|
||||
fetchFromGitHub,
|
||||
pillow,
|
||||
fnv-hash-fast,
|
||||
psutil-home-assistant,
|
||||
sqlalchemy,
|
||||
}:
|
||||
buildHomeAssistantComponent rec {
|
||||
owner = "frenck";
|
||||
domain = "spook";
|
||||
version = "3.0.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
inherit owner;
|
||||
repo = domain;
|
||||
rev = "refs/tags/v${version}";
|
||||
hash = "sha256-ChHsevryWuim8BEFqXVkCOW9fGMrt5vol+B2SreMUws=";
|
||||
};
|
||||
|
||||
patches = [./remove-sub-integration-symlink-hack.patch];
|
||||
|
||||
dependencies = [
|
||||
pillow
|
||||
fnv-hash-fast
|
||||
psutil-home-assistant
|
||||
sqlalchemy
|
||||
];
|
||||
|
||||
meta = {
|
||||
changelog = "https://github.com/frenck/spook/releases/tag/v${version}";
|
||||
description = "Toolbox for Home Assistant";
|
||||
homepage = "https://spook.boo/";
|
||||
license = lib.licenses.mit;
|
||||
maintainers = with lib.maintainers; [kkoniuszy];
|
||||
};
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
diff --git a/custom_components/spook/__init__.py b/custom_components/spook/__init__.py
|
||||
index 213fb2c..c7dc299 100644
|
||||
--- a/custom_components/spook/__init__.py
|
||||
+++ b/custom_components/spook/__init__.py
|
||||
@@ -23,8 +23,6 @@ from .templating import SpookTemplateFunctionManager
|
||||
from .util import (
|
||||
async_ensure_template_environments_exists,
|
||||
async_forward_setup_entry,
|
||||
- link_sub_integrations,
|
||||
- unlink_sub_integrations,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -34,48 +32,6 @@ if TYPE_CHECKING:
|
||||
|
||||
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
||||
"""Set up from a config entry."""
|
||||
- # Symlink all sub integrations from Spook to the parent integrations folder
|
||||
- # if one is missing, we have to restart Home Assistant.
|
||||
- # This is a workaround for the fact that Home Assistant doesn't support
|
||||
- # sub integrations.
|
||||
- if await hass.async_add_executor_job(link_sub_integrations, hass):
|
||||
- LOGGER.debug("Newly symlinked sub integrations, restarting Home Assistant")
|
||||
-
|
||||
- @callback
|
||||
- def _restart(_: Event | None = None) -> None:
|
||||
- """Restart Home Assistant."""
|
||||
- hass.data["homeassistant_stop"] = asyncio.create_task(
|
||||
- hass.async_stop(RESTART_EXIT_CODE),
|
||||
- )
|
||||
-
|
||||
- # User asked to restart Home Assistant in the config flow.
|
||||
- if hass.data.get(DOMAIN) == "Boo!":
|
||||
- _restart()
|
||||
- return False
|
||||
-
|
||||
- # Should be OK to restart. Better to do it before anything else started.
|
||||
- if hass.state == CoreState.starting:
|
||||
- _restart()
|
||||
- return False
|
||||
-
|
||||
- # If all other fails, but we are not running yet... wait for it.
|
||||
- if hass.state == CoreState.not_running:
|
||||
- # Listen to both... just in case.
|
||||
- hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _restart)
|
||||
- hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, _restart)
|
||||
- return False
|
||||
-
|
||||
- LOGGER.info(
|
||||
- "Home Assistant needs to be restarted in for Spook to complete setting up",
|
||||
- )
|
||||
- ir.async_create_issue(
|
||||
- hass=hass,
|
||||
- domain=DOMAIN,
|
||||
- issue_id="restart_required",
|
||||
- is_fixable=True,
|
||||
- severity=ir.IssueSeverity.WARNING,
|
||||
- translation_key="restart_required",
|
||||
- )
|
||||
|
||||
# Ensure template environments exists
|
||||
async_ensure_template_environments_exists(hass)
|
||||
@@ -120,4 +76,3 @@ async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
||||
|
||||
async def async_remove_entry(hass: HomeAssistant, _: ConfigEntry) -> None:
|
||||
"""Remove a config entry."""
|
||||
- await hass.async_add_executor_job(unlink_sub_integrations, hass)
|
||||
diff --git a/custom_components/spook/util.py b/custom_components/spook/util.py
|
||||
index 32e9bd2..845d463 100644
|
||||
--- a/custom_components/spook/util.py
|
||||
+++ b/custom_components/spook/util.py
|
||||
@@ -104,37 +104,6 @@ async def async_forward_platform_entry_setups_to_ectoplasm(
|
||||
)
|
||||
|
||||
|
||||
-def link_sub_integrations(hass: HomeAssistant) -> bool:
|
||||
- """Link Spook sub integrations."""
|
||||
- LOGGER.debug("Linking up Spook sub integrations")
|
||||
-
|
||||
- changes = False
|
||||
- for manifest in Path(__file__).parent.rglob("integrations/*/manifest.json"):
|
||||
- LOGGER.debug("Linking Spook sub integration: %s", manifest.parent.name)
|
||||
- dest = Path(hass.config.config_dir) / "custom_components" / manifest.parent.name
|
||||
- if not dest.exists():
|
||||
- src = (
|
||||
- Path(hass.config.config_dir)
|
||||
- / "custom_components"
|
||||
- / DOMAIN
|
||||
- / "integrations"
|
||||
- / manifest.parent.name
|
||||
- )
|
||||
- dest.symlink_to(src)
|
||||
- changes = True
|
||||
- return changes
|
||||
-
|
||||
-
|
||||
-def unlink_sub_integrations(hass: HomeAssistant) -> None:
|
||||
- """Unlink Spook sub integrations."""
|
||||
- LOGGER.debug("Unlinking Spook sub integrations")
|
||||
- for manifest in Path(__file__).parent.rglob("integrations/*/manifest.json"):
|
||||
- LOGGER.debug("Unlinking Spook sub integration: %s", manifest.parent.name)
|
||||
- dest = Path(hass.config.config_dir) / "custom_components" / manifest.parent.name
|
||||
- if dest.exists():
|
||||
- dest.unlink()
|
||||
-
|
||||
-
|
||||
@callback
|
||||
def async_ensure_template_environments_exists(hass: HomeAssistant) -> None:
|
||||
"""Ensure default template environments exist.
|
@ -15,13 +15,13 @@ in
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "janus-gateway";
|
||||
version = "1.2.2";
|
||||
version = "1.2.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "meetecho";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-BS6ErS2Wi8pOy8oFmVnbujYPwClxX8e+GL4CcqvOL9E=";
|
||||
sha256 = "sha256-3o9XxxTlWppq1mFgIUjstUFz6bT44mvBJa4FBgcc4Pc=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ autoreconfHook pkg-config gengetopt ];
|
||||
|
@ -3,6 +3,7 @@
|
||||
, buildGoModule
|
||||
, fetchFromGitHub
|
||||
, installShellFiles
|
||||
, nixosTests
|
||||
}:
|
||||
|
||||
buildGoModule rec {
|
||||
@ -39,6 +40,8 @@ buildGoModule rec {
|
||||
installShellCompletion amtool.zsh
|
||||
'';
|
||||
|
||||
passthru.tests = { inherit (nixosTests.prometheus) alertmanager; };
|
||||
|
||||
meta = with lib; {
|
||||
description = "Alert dispatcher for the Prometheus monitoring system";
|
||||
homepage = "https://github.com/prometheus/alertmanager";
|
||||
|
@ -31,10 +31,10 @@
|
||||
}:
|
||||
|
||||
let
|
||||
version = "2.52.0";
|
||||
version = "2.53.0";
|
||||
webUiStatic = fetchurl {
|
||||
url = "https://github.com/prometheus/prometheus/releases/download/v${version}/prometheus-web-ui-${version}.tar.gz";
|
||||
hash = "sha256-/HTNDu/msUFLiqbD5ryVPlDOqfAT+8MEGNusmu3xH1w=";
|
||||
hash = "sha256-oGhVpr47+blG7udu4Pcd+4Ndn0U+R47i69nheauDDoo=";
|
||||
};
|
||||
in
|
||||
buildGoModule rec {
|
||||
@ -47,10 +47,10 @@ buildGoModule rec {
|
||||
owner = "prometheus";
|
||||
repo = "prometheus";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-7HLSSls1aMZ6Zju2F9dArTFsCSESxDxWQ1a0QgD5Nxo=";
|
||||
hash = "sha256-clI8/P6Gdl8WSK1DsXUj+M9f/h35GX961QtpyvGNxUY=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-IzYYfB8cvvQAsOqEREVaCe80V3hhhmOEYsl9E9NJq20=";
|
||||
vendorHash = "sha256-0TR0jFEY+ls8C9EJEhSl2vX5VVZqZ8jGWDUalOddUPM=";
|
||||
|
||||
excludedPackages = [ "documentation/prometheus-mixin" ];
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ lib, buildGoModule, fetchFromGitHub, testers, prometheus-pushgateway }:
|
||||
{ lib, buildGoModule, fetchFromGitHub, nixosTests, testers, prometheus-pushgateway }:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "pushgateway";
|
||||
@ -23,8 +23,11 @@ buildGoModule rec {
|
||||
"-X github.com/prometheus/common/version.BuildDate=19700101-00:00:00"
|
||||
];
|
||||
|
||||
passthru.tests.version = testers.testVersion {
|
||||
package = prometheus-pushgateway;
|
||||
passthru.tests = {
|
||||
inherit (nixosTests.prometheus) pushgateway;
|
||||
version = testers.testVersion {
|
||||
package = prometheus-pushgateway;
|
||||
};
|
||||
};
|
||||
|
||||
meta = with lib; {
|
||||
|
@ -37,7 +37,7 @@ buildGoModule rec {
|
||||
passthru = {
|
||||
updateScript = nix-update-script { };
|
||||
tests = {
|
||||
inherit (nixosTests) prometheus;
|
||||
inherit (nixosTests) thanos;
|
||||
version = testers.testVersion {
|
||||
command = "thanos --version";
|
||||
package = thanos;
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
stdenvNoCC.mkDerivation rec {
|
||||
pname = "panoply";
|
||||
version = "5.4.1";
|
||||
version = "5.4.3";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://www.giss.nasa.gov/tools/panoply/download/PanoplyJ-${version}.tgz";
|
||||
sha256 = "sha256-C/9kFWDpv4bzqpsUcGpZn7P+fqU6teU39HibYhhva3o=";
|
||||
sha256 = "sha256-xey1Og0TEMgCc9Me/np7BGnUNYTz98gMFzLm2IcQHtw=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
|
Loading…
Reference in New Issue
Block a user