Merge master into haskell-updates
This commit is contained in:
commit
6863d678bc
@ -9,7 +9,7 @@ let
|
||||
catAttrs
|
||||
concatLists
|
||||
concatMap
|
||||
count
|
||||
concatStringsSep
|
||||
elem
|
||||
filter
|
||||
findFirst
|
||||
@ -47,6 +47,20 @@ let
|
||||
showOption
|
||||
unknownModule
|
||||
;
|
||||
|
||||
showDeclPrefix = loc: decl: prefix:
|
||||
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
|
||||
showRawDecls = loc: decls:
|
||||
concatStringsSep "\n"
|
||||
(sort (a: b: a < b)
|
||||
(concatMap
|
||||
(decl: map
|
||||
(showDeclPrefix loc decl)
|
||||
(attrNames decl.options)
|
||||
)
|
||||
decls
|
||||
));
|
||||
|
||||
in
|
||||
|
||||
rec {
|
||||
@ -474,26 +488,61 @@ rec {
|
||||
[{ inherit (module) file; inherit value; }]
|
||||
) configs;
|
||||
|
||||
# Convert an option tree decl to a submodule option decl
|
||||
optionTreeToOption = decl:
|
||||
if isOption decl.options
|
||||
then decl
|
||||
else decl // {
|
||||
options = mkOption {
|
||||
type = types.submoduleWith {
|
||||
modules = [ { options = decl.options; } ];
|
||||
# `null` is not intended for use by modules. It is an internal
|
||||
# value that means "whatever the user has declared elsewhere".
|
||||
# This might become obsolete with https://github.com/NixOS/nixpkgs/issues/162398
|
||||
shorthandOnlyDefinesConfig = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
resultsByName = mapAttrs (name: decls:
|
||||
# We're descending into attribute ‘name’.
|
||||
let
|
||||
loc = prefix ++ [name];
|
||||
defns = defnsByName.${name} or [];
|
||||
defns' = defnsByName'.${name} or [];
|
||||
nrOptions = count (m: isOption m.options) decls;
|
||||
optionDecls = filter (m: isOption m.options) decls;
|
||||
in
|
||||
if nrOptions == length decls then
|
||||
if length optionDecls == length decls then
|
||||
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
|
||||
in {
|
||||
matchedOptions = evalOptionValue loc opt defns';
|
||||
unmatchedDefns = [];
|
||||
}
|
||||
else if nrOptions != 0 then
|
||||
let
|
||||
firstOption = findFirst (m: isOption m.options) "" decls;
|
||||
firstNonOption = findFirst (m: !isOption m.options) "" decls;
|
||||
in
|
||||
throw "The option `${showOption loc}' in `${firstOption._file}' is a prefix of options in `${firstNonOption._file}'."
|
||||
else if optionDecls != [] then
|
||||
if all (x: x.options.type.name == "submodule") optionDecls
|
||||
# Raw options can only be merged into submodules. Merging into
|
||||
# attrsets might be nice, but ambiguous. Suppose we have
|
||||
# attrset as a `attrsOf submodule`. User declares option
|
||||
# attrset.foo.bar, this could mean:
|
||||
# a. option `bar` is only available in `attrset.foo`
|
||||
# b. option `foo.bar` is available in all `attrset.*`
|
||||
# c. reject and require "<name>" as a reminder that it behaves like (b).
|
||||
# d. magically combine (a) and (c).
|
||||
# All of the above are merely syntax sugar though.
|
||||
then
|
||||
let opt = fixupOptionType loc (mergeOptionDecls loc (map optionTreeToOption decls));
|
||||
in {
|
||||
matchedOptions = evalOptionValue loc opt defns';
|
||||
unmatchedDefns = [];
|
||||
}
|
||||
else
|
||||
let
|
||||
firstNonOption = findFirst (m: !isOption m.options) "" decls;
|
||||
nonOptions = filter (m: !isOption m.options) decls;
|
||||
in
|
||||
throw "The option `${showOption loc}' in module `${(lib.head optionDecls)._file}' would be a parent of the following options, but its type `${(lib.head optionDecls).options.type.description or "<no description>"}' does not support nested options.\n${
|
||||
showRawDecls loc nonOptions
|
||||
}"
|
||||
else
|
||||
mergeModules' loc decls defns) declsByName;
|
||||
|
||||
@ -753,13 +802,14 @@ rec {
|
||||
compare = a: b: (a.priority or 1000) < (b.priority or 1000);
|
||||
in sort compare defs';
|
||||
|
||||
/* Hack for backward compatibility: convert options of type
|
||||
optionSet to options of type submodule. FIXME: remove
|
||||
eventually. */
|
||||
fixupOptionType = loc: opt:
|
||||
let
|
||||
options = opt.options or
|
||||
(throw "Option `${showOption loc}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
|
||||
|
||||
# Hack for backward compatibility: convert options of type
|
||||
# optionSet to options of type submodule. FIXME: remove
|
||||
# eventually.
|
||||
f = tp:
|
||||
if tp.name == "option set" || tp.name == "submodule" then
|
||||
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
|
||||
|
@ -231,7 +231,7 @@ rec {
|
||||
then true
|
||||
else opt.visible or true;
|
||||
readOnly = opt.readOnly or false;
|
||||
type = opt.type.description or null;
|
||||
type = opt.type.description or "unspecified";
|
||||
}
|
||||
// optionalAttrs (opt ? example) { example = scrubOptionValue opt.example; }
|
||||
// optionalAttrs (opt ? default) { default = scrubOptionValue opt.default; }
|
||||
|
@ -62,6 +62,13 @@ checkConfigError() {
|
||||
checkConfigOutput '^false$' config.enable ./declare-enable.nix
|
||||
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./define-enable.nix
|
||||
|
||||
checkConfigOutput '^1$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix
|
||||
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix
|
||||
checkConfigOutput '^42$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
|
||||
checkConfigOutput '^420$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
|
||||
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./define-shorthandOnlyDefinesConfig-true.nix
|
||||
checkConfigError 'The option .bare-submodule.deep. in .*/declare-bare-submodule-deep-option.nix. is already declared in .*/declare-bare-submodule-deep-option-duplicate.nix' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./declare-bare-submodule-deep-option-duplicate.nix
|
||||
|
||||
# Check integer types.
|
||||
# unsigned
|
||||
checkConfigOutput '^42$' config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
|
||||
@ -304,6 +311,12 @@ checkConfigOutput "10" config.processedToplevel ./raw.nix
|
||||
checkConfigError "The option .multiple. is defined multiple times" config.multiple ./raw.nix
|
||||
checkConfigOutput "bar" config.priorities ./raw.nix
|
||||
|
||||
## Option collision
|
||||
checkConfigError \
|
||||
'The option .set. in module .*/declare-set.nix. would be a parent of the following options, but its type .attribute set of signed integers. does not support nested options.\n\s*- option[(]s[)] with prefix .set.enable. in module .*/declare-enable-nested.nix.' \
|
||||
config.set \
|
||||
./declare-set.nix ./declare-enable-nested.nix
|
||||
|
||||
# Test that types.optionType merges types correctly
|
||||
checkConfigOutput '^10$' config.theOption.int ./optionTypeMerging.nix
|
||||
checkConfigOutput '^"hello"$' config.theOption.str ./optionTypeMerging.nix
|
||||
|
@ -0,0 +1,10 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule.deep = mkOption {
|
||||
type = types.int;
|
||||
default = 2;
|
||||
};
|
||||
}
|
10
lib/tests/modules/declare-bare-submodule-deep-option.nix
Normal file
10
lib/tests/modules/declare-bare-submodule-deep-option.nix
Normal file
@ -0,0 +1,10 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule.deep = mkOption {
|
||||
type = types.int;
|
||||
default = 2;
|
||||
};
|
||||
}
|
19
lib/tests/modules/declare-bare-submodule-nested-option.nix
Normal file
19
lib/tests/modules/declare-bare-submodule-nested-option.nix
Normal file
@ -0,0 +1,19 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule = mkOption {
|
||||
type = types.submoduleWith {
|
||||
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
|
||||
modules = [
|
||||
{
|
||||
options.nested = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
18
lib/tests/modules/declare-bare-submodule.nix
Normal file
18
lib/tests/modules/declare-bare-submodule.nix
Normal file
@ -0,0 +1,18 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
in
|
||||
{
|
||||
options.bare-submodule = mkOption {
|
||||
type = types.submoduleWith {
|
||||
modules = [ ];
|
||||
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
|
||||
};
|
||||
default = {};
|
||||
};
|
||||
|
||||
# config-dependent options: won't recommend, but useful for making this test parameterized
|
||||
options.shorthandOnlyDefinesConfig = mkOption {
|
||||
default = false;
|
||||
};
|
||||
}
|
12
lib/tests/modules/declare-set.nix
Normal file
12
lib/tests/modules/declare-set.nix
Normal file
@ -0,0 +1,12 @@
|
||||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options.set = lib.mkOption {
|
||||
default = { };
|
||||
example = { a = 1; };
|
||||
type = lib.types.attrsOf lib.types.int;
|
||||
description = ''
|
||||
Some descriptive text
|
||||
'';
|
||||
};
|
||||
}
|
4
lib/tests/modules/define-bare-submodule-values.nix
Normal file
4
lib/tests/modules/define-bare-submodule-values.nix
Normal file
@ -0,0 +1,4 @@
|
||||
{
|
||||
bare-submodule.nested = 42;
|
||||
bare-submodule.deep = 420;
|
||||
}
|
@ -0,0 +1 @@
|
||||
{ shorthandOnlyDefinesConfig = true; }
|
@ -572,14 +572,18 @@ rec {
|
||||
let
|
||||
inherit (lib.modules) evalModules;
|
||||
|
||||
coerce = unify: value: if isFunction value
|
||||
then setFunctionArgs (args: unify (value args)) (functionArgs value)
|
||||
else unify (if shorthandOnlyDefinesConfig then { config = value; } else value);
|
||||
shorthandToModule = if shorthandOnlyDefinesConfig == false
|
||||
then value: value
|
||||
else value: { config = value; };
|
||||
|
||||
allModules = defs: imap1 (n: { value, file }:
|
||||
if isAttrs value || isFunction value then
|
||||
# Annotate the value with the location of its definition for better error messages
|
||||
coerce (lib.modules.unifyModuleSyntax file "${toString file}-${toString n}") value
|
||||
if isFunction value
|
||||
then setFunctionArgs
|
||||
(args: lib.modules.unifyModuleSyntax file "${toString file}-${toString n}" (value args))
|
||||
(functionArgs value)
|
||||
else if isAttrs value
|
||||
then
|
||||
lib.modules.unifyModuleSyntax file "${toString file}-${toString n}" (shorthandToModule value)
|
||||
else value
|
||||
) defs;
|
||||
|
||||
@ -647,7 +651,11 @@ rec {
|
||||
then lhs.specialArgs // rhs.specialArgs
|
||||
else throw "A submoduleWith option is declared multiple times with the same specialArgs \"${toString (attrNames intersecting)}\"";
|
||||
shorthandOnlyDefinesConfig =
|
||||
if lhs.shorthandOnlyDefinesConfig == rhs.shorthandOnlyDefinesConfig
|
||||
if lhs.shorthandOnlyDefinesConfig == null
|
||||
then rhs.shorthandOnlyDefinesConfig
|
||||
else if rhs.shorthandOnlyDefinesConfig == null
|
||||
then lhs.shorthandOnlyDefinesConfig
|
||||
else if lhs.shorthandOnlyDefinesConfig == rhs.shorthandOnlyDefinesConfig
|
||||
then lhs.shorthandOnlyDefinesConfig
|
||||
else throw "A submoduleWith option is declared multiple times with conflicting shorthandOnlyDefinesConfig values";
|
||||
};
|
||||
|
@ -1901,6 +1901,12 @@
|
||||
githubId = 82591;
|
||||
name = "Carl Sverre";
|
||||
};
|
||||
carpinchomug = {
|
||||
email = "aki.suda@protonmail.com";
|
||||
github = "carpinchomug";
|
||||
githubId = 101536256;
|
||||
name = "Akiyoshi Suda";
|
||||
};
|
||||
cartr = {
|
||||
email = "carter.sande@duodecima.technology";
|
||||
github = "cartr";
|
||||
|
@ -27,9 +27,10 @@ The function `mkOption` accepts the following arguments.
|
||||
|
||||
`type`
|
||||
|
||||
: The type of the option (see [](#sec-option-types)). It may be
|
||||
omitted, but that's not advisable since it may lead to errors that
|
||||
are hard to diagnose.
|
||||
: The type of the option (see [](#sec-option-types)). This
|
||||
argument is mandatory for nixpkgs modules. Setting this is highly
|
||||
recommended for the sake of documentation and type checking. In case it is
|
||||
not set, a fallback type with unspecified behavior is used.
|
||||
|
||||
`default`
|
||||
|
||||
|
@ -38,9 +38,11 @@ options = {
|
||||
<listitem>
|
||||
<para>
|
||||
The type of the option (see
|
||||
<xref linkend="sec-option-types" />). It may be omitted, but
|
||||
that’s not advisable since it may lead to errors that are hard
|
||||
to diagnose.
|
||||
<xref linkend="sec-option-types" />). This argument is
|
||||
mandatory for nixpkgs modules. Setting this is highly
|
||||
recommended for the sake of documentation and type checking.
|
||||
In case it is not set, a fallback type with unspecified
|
||||
behavior is used.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -569,6 +569,25 @@
|
||||
because Python 2 is being retired from nixpkgs.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Services in the <literal>hadoop</literal> module previously
|
||||
set <literal>openFirewall</literal> to true by default. This
|
||||
has now been changed to false. Node definitions for multi-node
|
||||
clusters would need <literal>openFirewall = true;</literal> to
|
||||
be added to to hadoop services when upgrading from NixOS
|
||||
21.11.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>services.hadoop.yarn.nodemanager</literal> now uses
|
||||
cgroup-based CPU limit enforcement by default. Additionally,
|
||||
the option <literal>useCGroups</literal> was added to
|
||||
nodemanagers as an easy way to switch back to the old
|
||||
behavior.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>wafHook</literal> hook now honors
|
||||
@ -1173,6 +1192,33 @@
|
||||
using the PyPy interpreter were added.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Some improvements have been made to the
|
||||
<literal>hadoop</literal> module:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
A <literal>gatewayRole</literal> option has been added,
|
||||
for deploying hadoop cluster configuration files to a node
|
||||
that does not have any active services
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Support for older versions of hadoop have been added to
|
||||
the module
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Overriding and extending site XML files has been made
|
||||
easier
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If you are using Wayland you can choose to use the Ozone
|
||||
@ -1286,6 +1332,52 @@
|
||||
instead of <literal>configuration.nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which
|
||||
introduces some breaking changes:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
If you are relying on the SQLite images, update your
|
||||
Docker Pull commands as follows:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>docker pull oryd/kratos:{version}</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Additionally, all passwords now have to be at least 8
|
||||
characters long.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
For more details, see:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1">Release
|
||||
Notes for v0.8.1-alpha-1</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1">Release
|
||||
Notes for v0.8.2-alpha-1</link>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>fetchFromSourcehut</literal> now allows fetching
|
||||
@ -1456,6 +1548,46 @@
|
||||
desktop environments as needed.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>hadoop</literal> package has added support for
|
||||
<literal>aarch64-linux</literal> and
|
||||
<literal>aarch64-darwin</literal> as of 3.3.1
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158613">#158613</link>).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>R</literal> package now builds again on
|
||||
<literal>aarch64-darwin</literal>
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158992">#158992</link>).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>spark3</literal> package has been updated from
|
||||
3.1.2 to 3.2.1
|
||||
(<link xlink:href="https://github.com/NixOS/nixpkgs/pull/160075">#160075</link>):
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Testing has been enabled for
|
||||
<literal>aarch64-linux</literal> in addition to
|
||||
<literal>x86_64-linux</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>spark3</literal> package is now usable on
|
||||
<literal>aarch64-darwin</literal> as a result of
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158613">#158613</link>
|
||||
and
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/158992">#158992</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -244,6 +244,14 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||
|
||||
- The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
|
||||
|
||||
- Services in the `hadoop` module previously set `openFirewall` to true by default.
|
||||
This has now been changed to false. Node definitions for multi-node clusters would need
|
||||
`openFirewall = true;` to be added to to hadoop services when upgrading from NixOS 21.11.
|
||||
|
||||
- `services.hadoop.yarn.nodemanager` now uses cgroup-based CPU limit enforcement by default.
|
||||
Additionally, the option `useCGroups` was added to nodemanagers as an easy way to switch
|
||||
back to the old behavior.
|
||||
|
||||
- The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
|
||||
|
||||
- `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
|
||||
@ -436,6 +444,11 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||
|
||||
- The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
|
||||
|
||||
- Some improvements have been made to the `hadoop` module:
|
||||
- A `gatewayRole` option has been added, for deploying hadoop cluster configuration files to a node that does not have any active services
|
||||
- Support for older versions of hadoop have been added to the module
|
||||
- Overriding and extending site XML files has been made easier
|
||||
|
||||
- If you are using Wayland you can choose to use the Ozone Wayland support
|
||||
in Chrome and several Electron apps by setting the environment variable
|
||||
`NIXOS_OZONE_WL=1` (for example via
|
||||
@ -482,6 +495,14 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||
|
||||
- `nixos-generate-config` now puts the dhcp configuration in `hardware-configuration.nix` instead of `configuration.nix`.
|
||||
|
||||
- ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which introduces some breaking changes:
|
||||
- If you are relying on the SQLite images, update your Docker Pull commands as follows:
|
||||
- `docker pull oryd/kratos:{version}`
|
||||
- Additionally, all passwords now have to be at least 8 characters long.
|
||||
- For more details, see:
|
||||
- [Release Notes for v0.8.1-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1)
|
||||
- [Release Notes for v0.8.2-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1)
|
||||
|
||||
- `fetchFromSourcehut` now allows fetching repositories recursively
|
||||
using `fetchgit` or `fetchhg` if the argument `fetchSubmodules`
|
||||
is set to `true`.
|
||||
@ -537,4 +558,13 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||
|
||||
- The polkit service, available at `security.polkit.enable`, is now disabled by default. It will automatically be enabled through services and desktop environments as needed.
|
||||
|
||||
- The `hadoop` package has added support for `aarch64-linux` and `aarch64-darwin` as of 3.3.1 ([#158613](https://github.com/NixOS/nixpkgs/pull/158613)).
|
||||
|
||||
- The `R` package now builds again on `aarch64-darwin` ([#158992](https://github.com/NixOS/nixpkgs/pull/158992)).
|
||||
|
||||
- The `spark3` package has been updated from 3.1.2 to 3.2.1 ([#160075](https://github.com/NixOS/nixpkgs/pull/160075)):
|
||||
|
||||
- Testing has been enabled for `aarch64-linux` in addition to `x86_64-linux`.
|
||||
- The `spark3` package is now usable on `aarch64-darwin` as a result of [#158613](https://github.com/NixOS/nixpkgs/pull/158613) and [#158992](https://github.com/NixOS/nixpkgs/pull/158992).
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
@ -66,14 +66,21 @@ for (k, v) in overrides.items():
|
||||
elif ov is not None or cur.get(ok, None) is None:
|
||||
cur[ok] = ov
|
||||
|
||||
severity = "error" if warningsAreErrors else "warning"
|
||||
|
||||
# check that every option has a description
|
||||
hasWarnings = False
|
||||
for (k, v) in options.items():
|
||||
if v.value.get('description', None) is None:
|
||||
severity = "error" if warningsAreErrors else "warning"
|
||||
hasWarnings = True
|
||||
print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr)
|
||||
v.value['description'] = "This option has no description."
|
||||
if v.value.get('type', "unspecified") == "unspecified":
|
||||
hasWarnings = True
|
||||
print(
|
||||
f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " +
|
||||
"https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr)
|
||||
|
||||
if hasWarnings and warningsAreErrors:
|
||||
print(
|
||||
"\x1b[1;31m" +
|
||||
|
@ -5,6 +5,7 @@ with lib;
|
||||
let
|
||||
cfg = config.systemd;
|
||||
lndir = "${pkgs.buildPackages.xorg.lndir}/bin/lndir";
|
||||
systemd = cfg.package;
|
||||
in rec {
|
||||
|
||||
shellEscape = s: (replaceChars [ "\\" ] [ "\\\\" ] s);
|
||||
@ -235,4 +236,205 @@ in rec {
|
||||
''}
|
||||
''; # */
|
||||
|
||||
makeJobScript = name: text:
|
||||
let
|
||||
scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
|
||||
out = (pkgs.writeShellScriptBin scriptName ''
|
||||
set -e
|
||||
${text}
|
||||
'').overrideAttrs (_: {
|
||||
# The derivation name is different from the script file name
|
||||
# to keep the script file name short to avoid cluttering logs.
|
||||
name = "unit-script-${scriptName}";
|
||||
});
|
||||
in "${out}/bin/${scriptName}";
|
||||
|
||||
unitConfig = { config, options, ... }: {
|
||||
config = {
|
||||
unitConfig =
|
||||
optionalAttrs (config.requires != [])
|
||||
{ Requires = toString config.requires; }
|
||||
// optionalAttrs (config.wants != [])
|
||||
{ Wants = toString config.wants; }
|
||||
// optionalAttrs (config.after != [])
|
||||
{ After = toString config.after; }
|
||||
// optionalAttrs (config.before != [])
|
||||
{ Before = toString config.before; }
|
||||
// optionalAttrs (config.bindsTo != [])
|
||||
{ BindsTo = toString config.bindsTo; }
|
||||
// optionalAttrs (config.partOf != [])
|
||||
{ PartOf = toString config.partOf; }
|
||||
// optionalAttrs (config.conflicts != [])
|
||||
{ Conflicts = toString config.conflicts; }
|
||||
// optionalAttrs (config.requisite != [])
|
||||
{ Requisite = toString config.requisite; }
|
||||
// optionalAttrs (config.restartTriggers != [])
|
||||
{ X-Restart-Triggers = toString config.restartTriggers; }
|
||||
// optionalAttrs (config.reloadTriggers != [])
|
||||
{ X-Reload-Triggers = toString config.reloadTriggers; }
|
||||
// optionalAttrs (config.description != "") {
|
||||
Description = config.description; }
|
||||
// optionalAttrs (config.documentation != []) {
|
||||
Documentation = toString config.documentation; }
|
||||
// optionalAttrs (config.onFailure != []) {
|
||||
OnFailure = toString config.onFailure; }
|
||||
// optionalAttrs (options.startLimitIntervalSec.isDefined) {
|
||||
StartLimitIntervalSec = toString config.startLimitIntervalSec;
|
||||
} // optionalAttrs (options.startLimitBurst.isDefined) {
|
||||
StartLimitBurst = toString config.startLimitBurst;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
serviceConfig = { name, config, ... }: {
|
||||
config = mkMerge
|
||||
[ { # Default path for systemd services. Should be quite minimal.
|
||||
path = mkAfter
|
||||
[ pkgs.coreutils
|
||||
pkgs.findutils
|
||||
pkgs.gnugrep
|
||||
pkgs.gnused
|
||||
systemd
|
||||
];
|
||||
environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
|
||||
}
|
||||
(mkIf (config.preStart != "")
|
||||
{ serviceConfig.ExecStartPre =
|
||||
[ (makeJobScript "${name}-pre-start" config.preStart) ];
|
||||
})
|
||||
(mkIf (config.script != "")
|
||||
{ serviceConfig.ExecStart =
|
||||
makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
|
||||
})
|
||||
(mkIf (config.postStart != "")
|
||||
{ serviceConfig.ExecStartPost =
|
||||
[ (makeJobScript "${name}-post-start" config.postStart) ];
|
||||
})
|
||||
(mkIf (config.reload != "")
|
||||
{ serviceConfig.ExecReload =
|
||||
makeJobScript "${name}-reload" config.reload;
|
||||
})
|
||||
(mkIf (config.preStop != "")
|
||||
{ serviceConfig.ExecStop =
|
||||
makeJobScript "${name}-pre-stop" config.preStop;
|
||||
})
|
||||
(mkIf (config.postStop != "")
|
||||
{ serviceConfig.ExecStopPost =
|
||||
makeJobScript "${name}-post-stop" config.postStop;
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
mountConfig = { config, ... }: {
|
||||
config = {
|
||||
mountConfig =
|
||||
{ What = config.what;
|
||||
Where = config.where;
|
||||
} // optionalAttrs (config.type != "") {
|
||||
Type = config.type;
|
||||
} // optionalAttrs (config.options != "") {
|
||||
Options = config.options;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
automountConfig = { config, ... }: {
|
||||
config = {
|
||||
automountConfig =
|
||||
{ Where = config.where;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
commonUnitText = def: ''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
|
||||
targetToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text =
|
||||
''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
serviceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Service]
|
||||
${let env = cfg.globalEnvironment // def.environment;
|
||||
in concatMapStrings (n:
|
||||
let s = optionalString (env.${n} != null)
|
||||
"Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
|
||||
# systemd max line length is now 1MiB
|
||||
# https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
|
||||
in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
|
||||
${if def.reloadIfChanged then ''
|
||||
X-ReloadIfChanged=true
|
||||
'' else if !def.restartIfChanged then ''
|
||||
X-RestartIfChanged=false
|
||||
'' else ""}
|
||||
${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
|
||||
${attrsToSection def.serviceConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
socketToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Socket]
|
||||
${attrsToSection def.socketConfig}
|
||||
${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
|
||||
${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
|
||||
'';
|
||||
};
|
||||
|
||||
timerToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Timer]
|
||||
${attrsToSection def.timerConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
pathToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Path]
|
||||
${attrsToSection def.pathConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
mountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Mount]
|
||||
${attrsToSection def.mountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
automountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Automount]
|
||||
${attrsToSection def.automountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
sliceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Slice]
|
||||
${attrsToSection def.sliceConfig}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
@ -183,7 +183,11 @@ in
|
||||
|
||||
pruneNames = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
|
||||
default = lib.optionals (!isFindutils) [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
|
||||
defaultText = literalDocBook ''
|
||||
<literal>[ ".bzr" ".cache" ".git" ".hg" ".svn" ]</literal>, if
|
||||
supported by the locate implementation (i.e. mlocate or plocate).
|
||||
'';
|
||||
description = ''
|
||||
Directory components which should exclude paths containing them from indexing
|
||||
'';
|
||||
|
@ -8,8 +8,12 @@ let
|
||||
concatStringsSep mapAttrsToList toLower
|
||||
literalExpression mkRenamedOptionModule mkDefault mkOption trivial types;
|
||||
|
||||
needsEscaping = s: null != builtins.match "[a-zA-Z0-9]+" s;
|
||||
escapeIfNeccessary = s: if needsEscaping s then s else ''"${lib.escape [ "\$" "\"" "\\" "\`" ] s}"'';
|
||||
attrsToText = attrs:
|
||||
concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}="${toString v}"'') attrs);
|
||||
concatStringsSep "\n" (
|
||||
mapAttrsToList (n: v: ''${n}=${escapeIfNeccessary (toString v)}'') attrs
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ cfg, pkgs, lib }:
|
||||
let
|
||||
propertyXml = name: value: ''
|
||||
propertyXml = name: value: lib.optionalString (value != null) ''
|
||||
<property>
|
||||
<name>${name}</name>
|
||||
<value>${builtins.toString value}</value>
|
||||
@ -29,16 +29,16 @@ let
|
||||
export HADOOP_LOG_DIR=/tmp/hadoop/$USER
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "hadoop-conf" {} ''
|
||||
pkgs.runCommand "hadoop-conf" {} (with cfg; ''
|
||||
mkdir -p $out/
|
||||
cp ${siteXml "core-site.xml" cfg.coreSite}/* $out/
|
||||
cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
|
||||
cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
|
||||
cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
|
||||
cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
|
||||
cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
|
||||
cp ${siteXml "core-site.xml" (coreSite // coreSiteInternal)}/* $out/
|
||||
cp ${siteXml "hdfs-site.xml" (hdfsSiteDefault // hdfsSite // hdfsSiteInternal)}/* $out/
|
||||
cp ${siteXml "mapred-site.xml" (mapredSiteDefault // mapredSite)}/* $out/
|
||||
cp ${siteXml "yarn-site.xml" (yarnSiteDefault // yarnSite // yarnSiteInternal)}/* $out/
|
||||
cp ${siteXml "httpfs-site.xml" httpfsSite}/* $out/
|
||||
cp ${cfgFile "container-executor.cfg" containerExecutorCfg}/* $out/
|
||||
cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
|
||||
cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
|
||||
cp ${cfg.log4jProperties} $out/log4j.properties
|
||||
${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") cfg.extraConfDirs}
|
||||
''
|
||||
cp ${log4jProperties} $out/log4j.properties
|
||||
${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") extraConfDirs}
|
||||
'')
|
||||
|
@ -21,24 +21,50 @@ with lib;
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
|
||||
'';
|
||||
};
|
||||
coreSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to core-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
hdfsSite = mkOption {
|
||||
hdfsSiteDefault = mkOption {
|
||||
default = {
|
||||
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.http-address" = "0.0.0.0:9870";
|
||||
"dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.http-bind-host" = "0.0.0.0";
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for hdfs-site.xml
|
||||
'';
|
||||
};
|
||||
hdfsSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
{
|
||||
"dfs.nameservices" = "namenode1";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop hdfs-site.xml definition
|
||||
Additional options and overrides for hdfs-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
|
||||
'';
|
||||
};
|
||||
hdfsSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to hdfs-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
mapredSite = mkOption {
|
||||
mapredSiteDefault = mkOption {
|
||||
default = {
|
||||
"mapreduce.framework.name" = "yarn";
|
||||
"yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
|
||||
@ -54,18 +80,25 @@ with lib;
|
||||
}
|
||||
'';
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for mapred-site.xml
|
||||
'';
|
||||
};
|
||||
mapredSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
options.services.hadoop.mapredSite.default // {
|
||||
{
|
||||
"mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop mapred-site.xml definition
|
||||
Additional options and overrides for mapred-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
|
||||
'';
|
||||
};
|
||||
|
||||
yarnSite = mkOption {
|
||||
yarnSiteDefault = mkOption {
|
||||
default = {
|
||||
"yarn.nodemanager.admin-env" = "PATH=$PATH";
|
||||
"yarn.nodemanager.aux-services" = "mapreduce_shuffle";
|
||||
@ -77,19 +110,34 @@ with lib;
|
||||
"yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
|
||||
"yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
|
||||
"yarn.resourcemanager.bind-host" = "0.0.0.0";
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler";
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Default options for yarn-site.xml
|
||||
'';
|
||||
};
|
||||
yarnSite = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
options.services.hadoop.yarnSite.default // {
|
||||
{
|
||||
"yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Hadoop yarn-site.xml definition
|
||||
Additional options and overrides for yarn-site.xml
|
||||
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
|
||||
'';
|
||||
};
|
||||
yarnSiteInternal = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf types.anything;
|
||||
internal = true;
|
||||
description = ''
|
||||
Internal option to add configs to yarn-site.xml based on module options
|
||||
'';
|
||||
};
|
||||
|
||||
httpfsSite = mkOption {
|
||||
default = { };
|
||||
@ -123,6 +171,7 @@ with lib;
|
||||
"yarn.nodemanager.linux-container-executor.group"="hadoop";
|
||||
"min.user.id"=1000;
|
||||
"feature.terminal.enabled"=1;
|
||||
"feature.mount-cgroup.enabled" = 1;
|
||||
};
|
||||
type = types.attrsOf types.anything;
|
||||
example = literalExpression ''
|
||||
@ -148,6 +197,8 @@ with lib;
|
||||
description = "Directories containing additional config files to be added to HADOOP_CONF_DIR";
|
||||
};
|
||||
|
||||
gatewayRole.enable = mkEnableOption "gateway role for deploying hadoop configs";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.hadoop;
|
||||
@ -157,20 +208,16 @@ with lib;
|
||||
};
|
||||
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf (builtins.hasAttr "yarn" config.users.users ||
|
||||
builtins.hasAttr "hdfs" config.users.users ||
|
||||
builtins.hasAttr "httpfs" config.users.users) {
|
||||
users.groups.hadoop = {
|
||||
gid = config.ids.gids.hadoop;
|
||||
};
|
||||
environment = {
|
||||
systemPackages = [ cfg.package ];
|
||||
etc."hadoop-conf".source = let
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
in "${hadoopConf}";
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
config = mkIf cfg.gatewayRole.enable {
|
||||
users.groups.hadoop = {
|
||||
gid = config.ids.gids.hadoop;
|
||||
};
|
||||
environment = {
|
||||
systemPackages = [ cfg.package ];
|
||||
etc."hadoop-conf".source = let
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
in "${hadoopConf}";
|
||||
variables.HADOOP_CONF_DIR = "/etc/hadoop-conf/";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -1,191 +1,191 @@
|
||||
{ config, lib, pkgs, ...}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.hadoop;
|
||||
|
||||
# Config files for hadoop services
|
||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||
restartIfChanged = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Automatically restart the service on config change.
|
||||
This can be set to false to defer restarts on clusters running critical applications.
|
||||
Please consider the security implications of inadvertently running an older version,
|
||||
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
# Generator for HDFS service options
|
||||
hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
|
||||
enable = mkEnableOption serviceName;
|
||||
restartIfChanged = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Automatically restart the service on config change.
|
||||
This can be set to false to defer restarts on clusters running critical applications.
|
||||
Please consider the security implications of inadvertently running an older version,
|
||||
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
extraFlags = mkOption{
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = "Extra command line flags to pass to ${serviceName}";
|
||||
example = [
|
||||
"-Dcom.sun.management.jmxremote"
|
||||
"-Dcom.sun.management.jmxremote.port=8010"
|
||||
];
|
||||
};
|
||||
extraEnv = mkOption{
|
||||
type = with types; attrsOf str;
|
||||
default = {};
|
||||
description = "Extra environment variables for ${serviceName}";
|
||||
};
|
||||
} // (optionalAttrs firewallOption {
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Open firewall ports for ${serviceName}.";
|
||||
};
|
||||
}) // (optionalAttrs (extraOpts != null) extraOpts);
|
||||
|
||||
# Generator for HDFS service configs
|
||||
hadoopServiceConfig =
|
||||
{ name
|
||||
, serviceOptions ? cfg.hdfs."${toLower name}"
|
||||
, description ? "Hadoop HDFS ${name}"
|
||||
, User ? "hdfs"
|
||||
, allowedTCPPorts ? [ ]
|
||||
, preStart ? ""
|
||||
, environment ? { }
|
||||
, extraConfig ? { }
|
||||
}: (
|
||||
|
||||
mkIf serviceOptions.enable ( mkMerge [{
|
||||
systemd.services."hdfs-${toLower name}" = {
|
||||
inherit description preStart;
|
||||
environment = environment // serviceOptions.extraEnv;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (serviceOptions) restartIfChanged;
|
||||
serviceConfig = {
|
||||
inherit User;
|
||||
SyslogIdentifier = "hdfs-${toLower name}";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = mkIf
|
||||
((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
|
||||
allowedTCPPorts;
|
||||
} extraConfig])
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
options.services.hadoop.hdfs = {
|
||||
namenode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS NameNode";
|
||||
|
||||
namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
|
||||
formatOnInit = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
|
||||
For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
|
||||
to initialize an HA cluster manually.
|
||||
'';
|
||||
};
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for namenode
|
||||
Format HDFS namenode on first start. This is useful for quickly spinning up
|
||||
ephemeral HDFS clusters with a single namenode.
|
||||
For HA clusters, initialization involves multiple steps across multiple nodes.
|
||||
Follow this guide to initialize an HA cluster manually:
|
||||
<link xlink:href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
|
||||
'';
|
||||
};
|
||||
};
|
||||
datanode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS DataNode";
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for datanode
|
||||
'';
|
||||
|
||||
datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
|
||||
dataDirs = mkOption {
|
||||
default = null;
|
||||
description = "Tier and path definitions for datanode storage.";
|
||||
type = with types; nullOr (listOf (submodule {
|
||||
options = {
|
||||
type = mkOption {
|
||||
type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
|
||||
description = ''
|
||||
Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
|
||||
'';
|
||||
};
|
||||
path = mkOption {
|
||||
type = path;
|
||||
example = [ "/var/lib/hadoop/hdfs/dn" ];
|
||||
description = "Determines where on the local filesystem a data node should store its blocks.";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
journalnode = {
|
||||
enable = mkEnableOption "Whether to run the HDFS JournalNode";
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for journalnode
|
||||
'';
|
||||
};
|
||||
|
||||
journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; };
|
||||
|
||||
zkfc = hadoopServiceOption {
|
||||
serviceName = "HDFS ZooKeeper failover controller";
|
||||
firewallOption = false;
|
||||
};
|
||||
zkfc = {
|
||||
enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
|
||||
inherit restartIfChanged;
|
||||
};
|
||||
httpfs = {
|
||||
enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
|
||||
|
||||
httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
|
||||
tempPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/tmp/hadoop/httpfs";
|
||||
description = ''
|
||||
HTTPFS_TEMP path used by HTTPFS
|
||||
'';
|
||||
};
|
||||
inherit restartIfChanged;
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Open firewall ports for HTTPFS
|
||||
'';
|
||||
description = "HTTPFS_TEMP path used by HTTPFS";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.hdfs.namenode.enable {
|
||||
systemd.services.hdfs-namenode = {
|
||||
description = "Hadoop HDFS NameNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.namenode) restartIfChanged;
|
||||
|
||||
preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
|
||||
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
|
||||
'');
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-namenode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "NameNode";
|
||||
allowedTCPPorts = [
|
||||
9870 # namenode.http-address
|
||||
8020 # namenode.rpc-address
|
||||
8022 # namenode. servicerpc-address
|
||||
]);
|
||||
8022 # namenode.servicerpc-address
|
||||
8019 # dfs.ha.zkfc.port
|
||||
];
|
||||
preStart = (mkIf cfg.hdfs.namenode.formatOnInit
|
||||
"${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
|
||||
);
|
||||
})
|
||||
(mkIf cfg.hdfs.datanode.enable {
|
||||
systemd.services.hdfs-datanode = {
|
||||
description = "Hadoop HDFS DataNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.datanode) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-datanode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "DataNode";
|
||||
# port numbers for datanode changed between hadoop 2 and 3
|
||||
allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
|
||||
9864 # datanode.http.address
|
||||
9866 # datanode.address
|
||||
9867 # datanode.ipc.address
|
||||
]);
|
||||
] else [
|
||||
50075 # datanode.http.address
|
||||
50010 # datanode.address
|
||||
50020 # datanode.ipc.address
|
||||
];
|
||||
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = let d = cfg.hdfs.datanode.dataDirs; in
|
||||
if (d!= null) then (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs) else d;
|
||||
})
|
||||
(mkIf cfg.hdfs.journalnode.enable {
|
||||
systemd.services.hdfs-journalnode = {
|
||||
description = "Hadoop HDFS JournalNode";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.journalnode) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-journalnode";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "JournalNode";
|
||||
allowedTCPPorts = [
|
||||
8480 # dfs.journalnode.http-address
|
||||
8485 # dfs.journalnode.rpc-address
|
||||
]);
|
||||
];
|
||||
})
|
||||
(mkIf cfg.hdfs.zkfc.enable {
|
||||
systemd.services.hdfs-zkfc = {
|
||||
description = "Hadoop HDFS ZooKeeper failover controller";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.zkfc) restartIfChanged;
|
||||
|
||||
serviceConfig = {
|
||||
User = "hdfs";
|
||||
SyslogIdentifier = "hdfs-zkfc";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
(hadoopServiceConfig {
|
||||
name = "zkfc";
|
||||
description = "Hadoop HDFS ZooKeeper failover controller";
|
||||
})
|
||||
(mkIf cfg.hdfs.httpfs.enable {
|
||||
systemd.services.hdfs-httpfs = {
|
||||
description = "Hadoop httpfs";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.hdfs.httpfs) restartIfChanged;
|
||||
|
||||
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
|
||||
|
||||
preStart = ''
|
||||
mkdir -p $HTTPFS_TEMP
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
User = "httpfs";
|
||||
SyslogIdentifier = "hdfs-httpfs";
|
||||
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
|
||||
(hadoopServiceConfig {
|
||||
name = "HTTPFS";
|
||||
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
|
||||
preStart = "mkdir -p $HTTPFS_TEMP";
|
||||
User = "httpfs";
|
||||
allowedTCPPorts = [
|
||||
14000 # httpfs.http.port
|
||||
]);
|
||||
];
|
||||
})
|
||||
(mkIf (
|
||||
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
|
||||
) {
|
||||
|
||||
(mkIf cfg.gatewayRole.enable {
|
||||
users.users.hdfs = {
|
||||
description = "Hadoop HDFS user";
|
||||
group = "hadoop";
|
||||
@ -199,5 +199,6 @@ in
|
||||
isSystemUser = true;
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
}
|
||||
|
@ -13,23 +13,77 @@ let
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
extraFlags = mkOption{
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = "Extra command line flags to pass to the service";
|
||||
example = [
|
||||
"-Dcom.sun.management.jmxremote"
|
||||
"-Dcom.sun.management.jmxremote.port=8010"
|
||||
];
|
||||
};
|
||||
extraEnv = mkOption{
|
||||
type = with types; attrsOf str;
|
||||
default = {};
|
||||
description = "Extra environment variables";
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.hadoop.yarn = {
|
||||
resourcemanager = {
|
||||
enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
|
||||
inherit restartIfChanged;
|
||||
enable = mkEnableOption "Hadoop YARN ResourceManager";
|
||||
inherit restartIfChanged extraFlags extraEnv;
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
default = false;
|
||||
description = ''
|
||||
Open firewall ports for resourcemanager
|
||||
'';
|
||||
};
|
||||
};
|
||||
nodemanager = {
|
||||
enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
|
||||
inherit restartIfChanged;
|
||||
enable = mkEnableOption "Hadoop YARN NodeManager";
|
||||
inherit restartIfChanged extraFlags extraEnv;
|
||||
|
||||
resource = {
|
||||
cpuVCores = mkOption {
|
||||
description = "Number of vcores that can be allocated for containers.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
maximumAllocationVCores = mkOption {
|
||||
description = "The maximum virtual CPU cores any container can be allocated.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
memoryMB = mkOption {
|
||||
description = "Amount of physical memory, in MB, that can be allocated for containers.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
maximumAllocationMB = mkOption {
|
||||
description = "The maximum physical memory any container can be allocated.";
|
||||
type = with types; nullOr ints.positive;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
useCGroups = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Use cgroups to enforce resource limits on containers
|
||||
'';
|
||||
};
|
||||
|
||||
localDir = mkOption {
|
||||
description = "List of directories to store localized files in.";
|
||||
type = with types; nullOr (listOf path);
|
||||
example = [ "/var/lib/hadoop/yarn/nm" ];
|
||||
default = null;
|
||||
};
|
||||
|
||||
addBinBash = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
@ -39,7 +93,7 @@ in
|
||||
};
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
default = false;
|
||||
description = ''
|
||||
Open firewall ports for nodemanager.
|
||||
Because containers can listen on any ephemeral port, TCP ports 1024–65535 will be opened.
|
||||
@ -49,10 +103,7 @@ in
|
||||
};
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf (
|
||||
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
|
||||
) {
|
||||
|
||||
(mkIf cfg.gatewayRole.enable {
|
||||
users.users.yarn = {
|
||||
description = "Hadoop YARN user";
|
||||
group = "hadoop";
|
||||
@ -65,15 +116,19 @@ in
|
||||
description = "Hadoop YARN ResourceManager";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.yarn.resourcemanager) restartIfChanged;
|
||||
environment = cfg.yarn.resourcemanager.extraEnv;
|
||||
|
||||
serviceConfig = {
|
||||
User = "yarn";
|
||||
SyslogIdentifier = "yarn-resourcemanager";
|
||||
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
|
||||
" resourcemanager";
|
||||
" resourcemanager ${escapeShellArgs cfg.yarn.resourcemanager.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
|
||||
8088 # resourcemanager.webapp.address
|
||||
8030 # resourcemanager.scheduler.address
|
||||
@ -94,6 +149,7 @@ in
|
||||
description = "Hadoop YARN NodeManager";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
inherit (cfg.yarn.nodemanager) restartIfChanged;
|
||||
environment = cfg.yarn.nodemanager.extraEnv;
|
||||
|
||||
preStart = ''
|
||||
# create log dir
|
||||
@ -101,8 +157,9 @@ in
|
||||
chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
|
||||
|
||||
# set up setuid container executor binary
|
||||
umount /run/wrappers/yarn-nodemanager/cgroup/cpu || true
|
||||
rm -rf /run/wrappers/yarn-nodemanager/ || true
|
||||
mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop}
|
||||
mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop,cgroup/cpu}
|
||||
cp ${cfg.package}/lib/${cfg.package.untarDir}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
|
||||
chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
|
||||
chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
|
||||
@ -114,11 +171,26 @@ in
|
||||
SyslogIdentifier = "yarn-nodemanager";
|
||||
PermissionsStartOnly = true;
|
||||
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
|
||||
" nodemanager";
|
||||
" nodemanager ${escapeShellArgs cfg.yarn.nodemanager.extraFlags}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
services.hadoop.gatewayRole.enable = true;
|
||||
|
||||
services.hadoop.yarnSiteInternal = with cfg.yarn.nodemanager; {
|
||||
"yarn.nodemanager.local-dirs" = localDir;
|
||||
"yarn.scheduler.maximum-allocation-vcores" = resource.maximumAllocationVCores;
|
||||
"yarn.scheduler.maximum-allocation-mb" = resource.maximumAllocationMB;
|
||||
"yarn.nodemanager.resource.cpu-vcores" = resource.cpuVCores;
|
||||
"yarn.nodemanager.resource.memory-mb" = resource.memoryMB;
|
||||
} // mkIf useCGroups {
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy" = "/hadoop-yarn";
|
||||
"yarn.nodemanager.linux-container-executor.resources-handler.class" = "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler";
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.mount" = "true";
|
||||
"yarn.nodemanager.linux-container-executor.cgroups.mount-path" = "/run/wrappers/yarn-nodemanager/cgroup";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPortRanges = [
|
||||
(mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
|
||||
];
|
||||
|
@ -194,19 +194,8 @@ let
|
||||
zone.children
|
||||
);
|
||||
|
||||
# fighting infinite recursion
|
||||
zoneOptions = zoneOptionsRaw // childConfig zoneOptions1 true;
|
||||
zoneOptions1 = zoneOptionsRaw // childConfig zoneOptions2 false;
|
||||
zoneOptions2 = zoneOptionsRaw // childConfig zoneOptions3 false;
|
||||
zoneOptions3 = zoneOptionsRaw // childConfig zoneOptions4 false;
|
||||
zoneOptions4 = zoneOptionsRaw // childConfig zoneOptions5 false;
|
||||
zoneOptions5 = zoneOptionsRaw // childConfig zoneOptions6 false;
|
||||
zoneOptions6 = zoneOptionsRaw // childConfig null false;
|
||||
|
||||
childConfig = x: v: { options.children = { type = types.attrsOf x; visible = v; }; };
|
||||
|
||||
# options are ordered alphanumerically
|
||||
zoneOptionsRaw = types.submodule {
|
||||
zoneOptions = types.submodule {
|
||||
options = {
|
||||
|
||||
allowAXFRFallback = mkOption {
|
||||
@ -246,6 +235,13 @@ let
|
||||
};
|
||||
|
||||
children = mkOption {
|
||||
# TODO: This relies on the fact that `types.anything` doesn't set any
|
||||
# values of its own to any defaults, because in the above zoneConfigs',
|
||||
# values from children override ones from parents, but only if the
|
||||
# attributes are defined. Because of this, we can't replace the element
|
||||
# type here with `zoneConfigs`, since that would set all the attributes
|
||||
# to default values, breaking the parent inheriting function.
|
||||
type = types.attrsOf types.anything;
|
||||
default = {};
|
||||
description = ''
|
||||
Children zones inherit all options of their parents. Attributes
|
||||
|
@ -62,6 +62,7 @@ in {
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/unbound";
|
||||
description = "Directory holding all state for unbound to run.";
|
||||
};
|
||||
|
@ -153,6 +153,7 @@ in
|
||||
|
||||
userlist = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
description = "See <option>userlistFile</option>.";
|
||||
};
|
||||
|
||||
|
@ -153,7 +153,7 @@ in {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
description = "Which package to use for the Nextcloud instance.";
|
||||
relatedPackages = [ "nextcloud21" "nextcloud22" "nextcloud23" ];
|
||||
relatedPackages = [ "nextcloud22" "nextcloud23" ];
|
||||
};
|
||||
phpPackage = mkOption {
|
||||
type = types.package;
|
||||
@ -571,15 +571,6 @@ in {
|
||||
nextcloud defined in an overlay, please set `services.nextcloud.package` to
|
||||
`pkgs.nextcloud`.
|
||||
''
|
||||
# 21.03 will not be an official release - it was instead 21.05.
|
||||
# This versionOlder statement remains set to 21.03 for backwards compatibility.
|
||||
# See https://github.com/NixOS/nixpkgs/pull/108899 and
|
||||
# https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
|
||||
# FIXME(@Ma27) remove this else-if as soon as 21.05 is EOL! This is only here
|
||||
# to ensure that users who are on Nextcloud 19 with a stateVersion <21.05 with
|
||||
# no explicit services.nextcloud.package don't upgrade to v21 by accident (
|
||||
# nextcloud20 throws an eval-error because it's dropped).
|
||||
else if versionOlder stateVersion "21.03" then nextcloud20
|
||||
else if versionOlder stateVersion "21.11" then nextcloud21
|
||||
else if versionOlder stateVersion "22.05" then nextcloud22
|
||||
else nextcloud23
|
||||
|
@ -219,6 +219,24 @@ in
|
||||
|
||||
session = mkOption {
|
||||
default = [];
|
||||
type = with types; listOf (submodule ({ ... }: {
|
||||
options = {
|
||||
manage = mkOption {
|
||||
description = "Whether this is a desktop or a window manager";
|
||||
type = enum [ "desktop" "window" ];
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
description = "Name of this session";
|
||||
type = str;
|
||||
};
|
||||
|
||||
start = mkOption {
|
||||
description = "Commands to run to start this session";
|
||||
type = lines;
|
||||
};
|
||||
};
|
||||
}));
|
||||
example = literalExpression
|
||||
''
|
||||
[ { manage = "desktop";
|
||||
|
@ -36,7 +36,7 @@ in
|
||||
|
||||
boot.kernelPackages = mkOption {
|
||||
default = pkgs.linuxPackages;
|
||||
type = types.unspecified // { merge = mergeEqualOption; };
|
||||
type = types.raw;
|
||||
apply = kernelPackages: kernelPackages.extend (self: super: {
|
||||
kernel = super.kernel.override (originalArgs: {
|
||||
inherit randstructSeed;
|
||||
|
@ -581,7 +581,7 @@ in
|
||||
else "gzip"
|
||||
);
|
||||
defaultText = literalDocBook "<literal>zstd</literal> if the kernel supports it (5.9+), <literal>gzip</literal> if not";
|
||||
type = types.unspecified; # We don't have a function type...
|
||||
type = types.either types.str (types.functionTo types.str);
|
||||
description = ''
|
||||
The compressor to use on the initrd image. May be any of:
|
||||
|
||||
|
@ -11,6 +11,22 @@ let
|
||||
|
||||
systemd = cfg.package;
|
||||
|
||||
inherit (systemdUtils.lib)
|
||||
makeJobScript
|
||||
unitConfig
|
||||
serviceConfig
|
||||
mountConfig
|
||||
automountConfig
|
||||
commonUnitText
|
||||
targetToUnit
|
||||
serviceToUnit
|
||||
socketToUnit
|
||||
timerToUnit
|
||||
pathToUnit
|
||||
mountToUnit
|
||||
automountToUnit
|
||||
sliceToUnit;
|
||||
|
||||
upstreamSystemUnits =
|
||||
[ # Targets.
|
||||
"basic.target"
|
||||
@ -209,207 +225,6 @@ let
|
||||
"xdg-desktop-autostart.target"
|
||||
];
|
||||
|
||||
makeJobScript = name: text:
|
||||
let
|
||||
scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
|
||||
out = (pkgs.writeShellScriptBin scriptName ''
|
||||
set -e
|
||||
${text}
|
||||
'').overrideAttrs (_: {
|
||||
# The derivation name is different from the script file name
|
||||
# to keep the script file name short to avoid cluttering logs.
|
||||
name = "unit-script-${scriptName}";
|
||||
});
|
||||
in "${out}/bin/${scriptName}";
|
||||
|
||||
unitConfig = { config, options, ... }: {
|
||||
config = {
|
||||
unitConfig =
|
||||
optionalAttrs (config.requires != [])
|
||||
{ Requires = toString config.requires; }
|
||||
// optionalAttrs (config.wants != [])
|
||||
{ Wants = toString config.wants; }
|
||||
// optionalAttrs (config.after != [])
|
||||
{ After = toString config.after; }
|
||||
// optionalAttrs (config.before != [])
|
||||
{ Before = toString config.before; }
|
||||
// optionalAttrs (config.bindsTo != [])
|
||||
{ BindsTo = toString config.bindsTo; }
|
||||
// optionalAttrs (config.partOf != [])
|
||||
{ PartOf = toString config.partOf; }
|
||||
// optionalAttrs (config.conflicts != [])
|
||||
{ Conflicts = toString config.conflicts; }
|
||||
// optionalAttrs (config.requisite != [])
|
||||
{ Requisite = toString config.requisite; }
|
||||
// optionalAttrs (config.restartTriggers != [])
|
||||
{ X-Restart-Triggers = toString config.restartTriggers; }
|
||||
// optionalAttrs (config.reloadTriggers != [])
|
||||
{ X-Reload-Triggers = toString config.reloadTriggers; }
|
||||
// optionalAttrs (config.description != "") {
|
||||
Description = config.description; }
|
||||
// optionalAttrs (config.documentation != []) {
|
||||
Documentation = toString config.documentation; }
|
||||
// optionalAttrs (config.onFailure != []) {
|
||||
OnFailure = toString config.onFailure; }
|
||||
// optionalAttrs (options.startLimitIntervalSec.isDefined) {
|
||||
StartLimitIntervalSec = toString config.startLimitIntervalSec;
|
||||
} // optionalAttrs (options.startLimitBurst.isDefined) {
|
||||
StartLimitBurst = toString config.startLimitBurst;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
serviceConfig = { name, config, ... }: {
|
||||
config = mkMerge
|
||||
[ { # Default path for systemd services. Should be quite minimal.
|
||||
path = mkAfter
|
||||
[ pkgs.coreutils
|
||||
pkgs.findutils
|
||||
pkgs.gnugrep
|
||||
pkgs.gnused
|
||||
systemd
|
||||
];
|
||||
environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
|
||||
}
|
||||
(mkIf (config.preStart != "")
|
||||
{ serviceConfig.ExecStartPre =
|
||||
[ (makeJobScript "${name}-pre-start" config.preStart) ];
|
||||
})
|
||||
(mkIf (config.script != "")
|
||||
{ serviceConfig.ExecStart =
|
||||
makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
|
||||
})
|
||||
(mkIf (config.postStart != "")
|
||||
{ serviceConfig.ExecStartPost =
|
||||
[ (makeJobScript "${name}-post-start" config.postStart) ];
|
||||
})
|
||||
(mkIf (config.reload != "")
|
||||
{ serviceConfig.ExecReload =
|
||||
makeJobScript "${name}-reload" config.reload;
|
||||
})
|
||||
(mkIf (config.preStop != "")
|
||||
{ serviceConfig.ExecStop =
|
||||
makeJobScript "${name}-pre-stop" config.preStop;
|
||||
})
|
||||
(mkIf (config.postStop != "")
|
||||
{ serviceConfig.ExecStopPost =
|
||||
makeJobScript "${name}-post-stop" config.postStop;
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
mountConfig = { config, ... }: {
|
||||
config = {
|
||||
mountConfig =
|
||||
{ What = config.what;
|
||||
Where = config.where;
|
||||
} // optionalAttrs (config.type != "") {
|
||||
Type = config.type;
|
||||
} // optionalAttrs (config.options != "") {
|
||||
Options = config.options;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
automountConfig = { config, ... }: {
|
||||
config = {
|
||||
automountConfig =
|
||||
{ Where = config.where;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
commonUnitText = def: ''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
|
||||
targetToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text =
|
||||
''
|
||||
[Unit]
|
||||
${attrsToSection def.unitConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
serviceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Service]
|
||||
${let env = cfg.globalEnvironment // def.environment;
|
||||
in concatMapStrings (n:
|
||||
let s = optionalString (env.${n} != null)
|
||||
"Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
|
||||
# systemd max line length is now 1MiB
|
||||
# https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
|
||||
in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
|
||||
${if def.reloadIfChanged then ''
|
||||
X-ReloadIfChanged=true
|
||||
'' else if !def.restartIfChanged then ''
|
||||
X-RestartIfChanged=false
|
||||
'' else ""}
|
||||
${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
|
||||
${attrsToSection def.serviceConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
socketToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Socket]
|
||||
${attrsToSection def.socketConfig}
|
||||
${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
|
||||
${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
|
||||
'';
|
||||
};
|
||||
|
||||
timerToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Timer]
|
||||
${attrsToSection def.timerConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
pathToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Path]
|
||||
${attrsToSection def.pathConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
mountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Mount]
|
||||
${attrsToSection def.mountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
automountToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Automount]
|
||||
${attrsToSection def.automountConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
sliceToUnit = name: def:
|
||||
{ inherit (def) aliases wantedBy requiredBy enable;
|
||||
text = commonUnitText def +
|
||||
''
|
||||
[Slice]
|
||||
${attrsToSection def.sliceConfig}
|
||||
'';
|
||||
};
|
||||
|
||||
logindHandlerType = types.enum [
|
||||
"ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
|
||||
|
@ -189,9 +189,9 @@ in
|
||||
grocy = handleTest ./grocy.nix {};
|
||||
grub = handleTest ./grub.nix {};
|
||||
gvisor = handleTest ./gvisor.nix {};
|
||||
hadoop.all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hadoop.nix {};
|
||||
hadoop.hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hdfs.nix {};
|
||||
hadoop.yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/yarn.nix {};
|
||||
hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
|
||||
hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
|
||||
hadoop2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop2; };
|
||||
haka = handleTest ./haka.nix {};
|
||||
haproxy = handleTest ./haproxy.nix {};
|
||||
hardened = handleTest ./hardened.nix {};
|
||||
@ -485,7 +485,7 @@ in
|
||||
sonarr = handleTest ./sonarr.nix {};
|
||||
sourcehut = handleTest ./sourcehut.nix {};
|
||||
spacecookie = handleTest ./spacecookie.nix {};
|
||||
spark = handleTestOn ["x86_64-linux"] ./spark {};
|
||||
spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
|
||||
sslh = handleTest ./sslh.nix {};
|
||||
sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
|
||||
sssd-ldap = handleTestOn ["x86_64-linux"] ./sssd-ldap.nix {};
|
||||
|
7
nixos/tests/hadoop/default.nix
Normal file
7
nixos/tests/hadoop/default.nix
Normal file
@ -0,0 +1,7 @@
|
||||
{ handleTestOn, package, ... }:
|
||||
|
||||
{
|
||||
all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop.nix { inherit package; };
|
||||
hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hdfs.nix { inherit package; };
|
||||
yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./yarn.nix { inherit package; };
|
||||
}
|
@ -1,121 +1,148 @@
|
||||
# This test is very comprehensive. It tests whether all hadoop services work well with each other.
|
||||
# Run this when updating the Hadoop package or making significant changes to the hadoop module.
|
||||
# For a more basic test, see hdfs.nix and yarn.nix
|
||||
import ../make-test-python.nix ({pkgs, ...}: {
|
||||
import ../make-test-python.nix ({ package, ... }: {
|
||||
name = "hadoop-combined";
|
||||
|
||||
nodes = let
|
||||
package = pkgs.hadoop;
|
||||
coreSite = {
|
||||
"fs.defaultFS" = "hdfs://ns1";
|
||||
};
|
||||
hdfsSite = {
|
||||
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.http-bind-host" = "0.0.0.0";
|
||||
"dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
|
||||
nodes =
|
||||
let
|
||||
coreSite = {
|
||||
"fs.defaultFS" = "hdfs://ns1";
|
||||
};
|
||||
hdfsSite = {
|
||||
# HA Quorum Journal Manager configuration
|
||||
"dfs.nameservices" = "ns1";
|
||||
"dfs.ha.namenodes.ns1" = "nn1,nn2";
|
||||
"dfs.namenode.shared.edits.dir.ns1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
|
||||
"dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
|
||||
"dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
|
||||
"dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
|
||||
"dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
|
||||
"dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
|
||||
"dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
|
||||
|
||||
# HA Quorum Journal Manager configuration
|
||||
"dfs.nameservices" = "ns1";
|
||||
"dfs.ha.namenodes.ns1" = "nn1,nn2";
|
||||
"dfs.namenode.shared.edits.dir.ns1.nn1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
|
||||
"dfs.namenode.shared.edits.dir.ns1.nn2" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
|
||||
"dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
|
||||
"dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
|
||||
"dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
|
||||
"dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
|
||||
"dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
|
||||
"dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
|
||||
# Automatic failover configuration
|
||||
"dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
|
||||
"dfs.ha.automatic-failover.enabled.ns1" = "true";
|
||||
"dfs.ha.fencing.methods" = "shell(true)";
|
||||
"ha.zookeeper.quorum" = "zk1:2181";
|
||||
};
|
||||
yarnSite = {
|
||||
"yarn.resourcemanager.zk-address" = "zk1:2181";
|
||||
"yarn.resourcemanager.ha.enabled" = "true";
|
||||
"yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
|
||||
"yarn.resourcemanager.hostname.rm1" = "rm1";
|
||||
"yarn.resourcemanager.hostname.rm2" = "rm2";
|
||||
"yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
|
||||
"yarn.resourcemanager.cluster-id" = "cluster1";
|
||||
# yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
|
||||
# hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
|
||||
# that causes AM containers to fail otherwise.
|
||||
"yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
|
||||
"yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
|
||||
};
|
||||
in
|
||||
{
|
||||
zk1 = { ... }: {
|
||||
services.zookeeper.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [ 2181 ];
|
||||
};
|
||||
|
||||
# Automatic failover configuration
|
||||
"dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
|
||||
"dfs.ha.automatic-failover.enabled.ns1" = "true";
|
||||
"dfs.ha.fencing.methods" = "shell(true)";
|
||||
"ha.zookeeper.quorum" = "zk1:2181";
|
||||
};
|
||||
yarnSiteHA = {
|
||||
"yarn.resourcemanager.zk-address" = "zk1:2181";
|
||||
"yarn.resourcemanager.ha.enabled" = "true";
|
||||
"yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
|
||||
"yarn.resourcemanager.hostname.rm1" = "rm1";
|
||||
"yarn.resourcemanager.hostname.rm2" = "rm2";
|
||||
"yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
|
||||
"yarn.resourcemanager.cluster-id" = "cluster1";
|
||||
# yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
|
||||
# hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
|
||||
# that causes AM containers to fail otherwise.
|
||||
"yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
|
||||
"yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
|
||||
};
|
||||
in {
|
||||
zk1 = { ... }: {
|
||||
services.zookeeper.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [ 2181 ];
|
||||
};
|
||||
# HDFS cluster
|
||||
nn1 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.namenode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
hdfs.zkfc.enable = true;
|
||||
};
|
||||
};
|
||||
nn2 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.namenode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
hdfs.zkfc.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
# HDFS cluster
|
||||
nn1 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.namenode.enable = true;
|
||||
hdfs.zkfc.enable = true;
|
||||
jn1 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
nn2 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.namenode.enable = true;
|
||||
hdfs.zkfc.enable = true;
|
||||
jn2 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
jn3 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
jn1 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode.enable = true;
|
||||
dn1 = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.datanode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
jn2 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode.enable = true;
|
||||
};
|
||||
};
|
||||
jn3 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.journalnode.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
dn1 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
hdfs.datanode.enable = true;
|
||||
# YARN cluster
|
||||
rm1 = { options, ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite yarnSite;
|
||||
yarn.resourcemanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# YARN cluster
|
||||
rm1 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
|
||||
yarn.resourcemanager.enable = true;
|
||||
rm2 = { options, ... }: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite yarnSite;
|
||||
yarn.resourcemanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
rm2 = {pkgs, options, ...}: {
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
|
||||
yarn.resourcemanager.enable = true;
|
||||
nm1 = { options, ... }: {
|
||||
virtualisation.memorySize = 2048;
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite yarnSite;
|
||||
yarn.nodemanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
nm1 = {pkgs, options, ...}: {
|
||||
virtualisation.memorySize = 2048;
|
||||
services.hadoop = {
|
||||
inherit package coreSite hdfsSite;
|
||||
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
|
||||
yarn.nodemanager.enable = true;
|
||||
client = { options, ... }: {
|
||||
services.hadoop = {
|
||||
gatewayRole.enable = true;
|
||||
inherit package coreSite hdfsSite yarnSite;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
@ -173,26 +200,26 @@ import ../make-test-python.nix ({pkgs, ...}: {
|
||||
# DN should have started by now, but confirm anyway
|
||||
dn1.wait_for_unit("hdfs-datanode")
|
||||
# Print states of namenodes
|
||||
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
# Wait for cluster to exit safemode
|
||||
dn1.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
|
||||
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
|
||||
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
# test R/W
|
||||
dn1.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
|
||||
assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
|
||||
client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
|
||||
assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
|
||||
|
||||
# Test NN failover
|
||||
nn1.succeed("systemctl stop hdfs-namenode")
|
||||
assert "active" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
|
||||
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
|
||||
assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
|
||||
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
|
||||
|
||||
nn1.succeed("systemctl start hdfs-namenode")
|
||||
nn1.wait_for_open_port(9870)
|
||||
nn1.wait_for_open_port(8022)
|
||||
nn1.wait_for_open_port(8020)
|
||||
assert "standby" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
|
||||
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
|
||||
client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
|
||||
|
||||
#### YARN tests ####
|
||||
|
||||
@ -208,21 +235,21 @@ import ../make-test-python.nix ({pkgs, ...}: {
|
||||
nm1.wait_for_unit("yarn-nodemanager")
|
||||
nm1.wait_for_open_port(8042)
|
||||
nm1.wait_for_open_port(8040)
|
||||
nm1.wait_until_succeeds("yarn node -list | grep Nodes:1")
|
||||
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
nm1.succeed("sudo -u yarn yarn node -list | systemd-cat")
|
||||
client.wait_until_succeeds("yarn node -list | grep Nodes:1")
|
||||
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
client.succeed("sudo -u yarn yarn node -list | systemd-cat")
|
||||
|
||||
# Test RM failover
|
||||
rm1.succeed("systemctl stop yarn-resourcemanager")
|
||||
assert "standby" not in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
|
||||
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
|
||||
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
rm1.succeed("systemctl start yarn-resourcemanager")
|
||||
rm1.wait_for_unit("yarn-resourcemanager")
|
||||
rm1.wait_for_open_port(8088)
|
||||
assert "standby" in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
|
||||
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
|
||||
client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
|
||||
|
||||
assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
|
||||
assert "SUCCEEDED" in nm1.succeed("yarn application -list -appStates FINISHED")
|
||||
assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
|
||||
assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
|
||||
'';
|
||||
})
|
||||
|
@ -1,32 +1,46 @@
|
||||
# Test a minimal HDFS cluster with no HA
|
||||
import ../make-test-python.nix ({...}: {
|
||||
nodes = {
|
||||
namenode = {pkgs, ...}: {
|
||||
import ../make-test-python.nix ({ package, lib, ... }:
|
||||
with lib;
|
||||
{
|
||||
name = "hadoop-hdfs";
|
||||
|
||||
nodes = let
|
||||
coreSite = {
|
||||
"fs.defaultFS" = "hdfs://namenode:8020";
|
||||
"hadoop.proxyuser.httpfs.groups" = "*";
|
||||
"hadoop.proxyuser.httpfs.hosts" = "*";
|
||||
};
|
||||
in {
|
||||
namenode = { pkgs, ... }: {
|
||||
services.hadoop = {
|
||||
package = pkgs.hadoop;
|
||||
inherit package;
|
||||
hdfs = {
|
||||
namenode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
formatOnInit = true;
|
||||
};
|
||||
httpfs.enable = true;
|
||||
};
|
||||
coreSite = {
|
||||
"fs.defaultFS" = "hdfs://namenode:8020";
|
||||
"hadoop.proxyuser.httpfs.groups" = "*";
|
||||
"hadoop.proxyuser.httpfs.hosts" = "*";
|
||||
httpfs = {
|
||||
# The NixOS hadoop module only support webHDFS on 3.3 and newer
|
||||
enable = mkIf (versionAtLeast package.version "3.3") true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
inherit coreSite;
|
||||
};
|
||||
};
|
||||
datanode = {pkgs, ...}: {
|
||||
datanode = { pkgs, ... }: {
|
||||
services.hadoop = {
|
||||
package = pkgs.hadoop;
|
||||
hdfs.datanode.enable = true;
|
||||
coreSite = {
|
||||
"fs.defaultFS" = "hdfs://namenode:8020";
|
||||
"hadoop.proxyuser.httpfs.groups" = "*";
|
||||
"hadoop.proxyuser.httpfs.hosts" = "*";
|
||||
inherit package;
|
||||
hdfs.datanode = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
dataDirs = [{
|
||||
type = "DISK";
|
||||
path = "/tmp/dn1";
|
||||
}];
|
||||
};
|
||||
inherit coreSite;
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -37,21 +51,32 @@ import ../make-test-python.nix ({...}: {
|
||||
namenode.wait_for_unit("hdfs-namenode")
|
||||
namenode.wait_for_unit("network.target")
|
||||
namenode.wait_for_open_port(8020)
|
||||
namenode.succeed("ss -tulpne | systemd-cat")
|
||||
namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
|
||||
namenode.wait_for_open_port(9870)
|
||||
|
||||
datanode.wait_for_unit("hdfs-datanode")
|
||||
datanode.wait_for_unit("network.target")
|
||||
'' + ( if versionAtLeast package.version "3" then ''
|
||||
datanode.wait_for_open_port(9864)
|
||||
datanode.wait_for_open_port(9866)
|
||||
datanode.wait_for_open_port(9867)
|
||||
|
||||
namenode.succeed("curl -f http://namenode:9870")
|
||||
datanode.succeed("curl -f http://datanode:9864")
|
||||
'' else ''
|
||||
datanode.wait_for_open_port(50075)
|
||||
datanode.wait_for_open_port(50010)
|
||||
datanode.wait_for_open_port(50020)
|
||||
|
||||
datanode.succeed("curl -f http://datanode:50075")
|
||||
'' ) + ''
|
||||
namenode.succeed("curl -f http://namenode:9870")
|
||||
|
||||
datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
|
||||
datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
|
||||
assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
|
||||
|
||||
'' + optionalString ( versionAtLeast package.version "3.3" ) ''
|
||||
namenode.wait_for_unit("hdfs-httpfs")
|
||||
namenode.wait_for_open_port(14000)
|
||||
assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
|
||||
|
@ -1,22 +1,30 @@
|
||||
# This only tests if YARN is able to start its services
|
||||
import ../make-test-python.nix ({...}: {
|
||||
nodes = {
|
||||
resourcemanager = {pkgs, ...}: {
|
||||
services.hadoop.package = pkgs.hadoop;
|
||||
services.hadoop.yarn.resourcemanager.enable = true;
|
||||
services.hadoop.yarnSite = {
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||
};
|
||||
};
|
||||
nodemanager = {pkgs, ...}: {
|
||||
services.hadoop.package = pkgs.hadoop;
|
||||
services.hadoop.yarn.nodemanager.enable = true;
|
||||
services.hadoop.yarnSite = {
|
||||
"yarn.resourcemanager.hostname" = "resourcemanager";
|
||||
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";
|
||||
};
|
||||
};
|
||||
import ../make-test-python.nix ({ package, ... }: {
|
||||
name = "hadoop-yarn";
|
||||
|
||||
nodes = {
|
||||
resourcemanager = { ... }: {
|
||||
services.hadoop = {
|
||||
inherit package;
|
||||
yarn.resourcemanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
nodemanager = { options, lib, ... }: {
|
||||
services.hadoop = {
|
||||
inherit package;
|
||||
yarn.nodemanager = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
yarnSite = options.services.hadoop.yarnSite.default // {
|
||||
"yarn.resourcemanager.hostname" = "resourcemanager";
|
||||
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
|
@ -18,4 +18,4 @@ foldl
|
||||
};
|
||||
})
|
||||
{ }
|
||||
[ 21 22 23 ]
|
||||
[ 22 23 ]
|
||||
|
@ -2,24 +2,33 @@
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "mopidy-ytmusic";
|
||||
version = "0.3.2";
|
||||
version = "0.3.5";
|
||||
|
||||
src = python3Packages.fetchPypi {
|
||||
inherit version;
|
||||
pname = "Mopidy-YTMusic";
|
||||
sha256 = "sha256-BZtW+qHsTnOMj+jdAFI8ZMwGxJc9lNosgPJZGbt4JgU=";
|
||||
sha256 = "0pncyxfqxvznb9y4ksndbny1yf5mxh4089ak0yz86dp2qi5j99iv";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
substituteInPlace setup.py \
|
||||
--replace 'ytmusicapi>=0.20.0,<0.21.0' 'ytmusicapi>=0.20.0'
|
||||
'';
|
||||
|
||||
propagatedBuildInputs = [
|
||||
mopidy
|
||||
python3Packages.ytmusicapi
|
||||
python3Packages.pytube
|
||||
];
|
||||
|
||||
pythonImportsCheck = [ "mopidy_ytmusic" ];
|
||||
|
||||
# has no tests
|
||||
doCheck = false;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Mopidy extension for playing music from YouTube Music";
|
||||
homepage = "https://github.com/OzymandiasTheGreat/mopidy-ytmusic";
|
||||
license = licenses.asl20;
|
||||
maintainers = [ maintainers.nickhu ];
|
||||
};
|
||||
|
83
pkgs/applications/editors/lapce/default.nix
Normal file
83
pkgs/applications/editors/lapce/default.nix
Normal file
@ -0,0 +1,83 @@
|
||||
{ lib
|
||||
, stdenv
|
||||
, fetchFromGitHub
|
||||
, rustPlatform
|
||||
, cmake
|
||||
, pkg-config
|
||||
, python3
|
||||
, perl
|
||||
, freetype
|
||||
, fontconfig
|
||||
, libxkbcommon
|
||||
, xcbutil
|
||||
, libX11
|
||||
, libXcursor
|
||||
, libXrandr
|
||||
, libXi
|
||||
, vulkan-loader
|
||||
, copyDesktopItems
|
||||
, makeDesktopItem
|
||||
}:
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "lapce";
|
||||
version = "0.0.10";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "lapce";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "tOVFm4DFQurFU4DtpPwxXQLbTGCZnrV1FfYKtvkRxRE=";
|
||||
};
|
||||
|
||||
cargoPatches = [ ./fix-version.patch ];
|
||||
|
||||
cargoSha256 = "BwB3KgmI5XnZ5uHv6f+kGKBzpyxPWcoKvF7qw90eorI=";
|
||||
|
||||
nativeBuildInputs = [
|
||||
cmake
|
||||
pkg-config
|
||||
python3
|
||||
perl
|
||||
copyDesktopItems
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
freetype
|
||||
fontconfig
|
||||
libxkbcommon
|
||||
xcbutil
|
||||
libX11
|
||||
libXcursor
|
||||
libXrandr
|
||||
libXi
|
||||
vulkan-loader
|
||||
];
|
||||
|
||||
# Add missing vulkan dependency to rpath
|
||||
preFixup = ''
|
||||
patchelf --add-needed ${vulkan-loader}/lib/libvulkan.so.1 $out/bin/lapce
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
install -Dm0644 $src/extra/images/logo.svg $out/share/icons/hicolor/scalable/apps/lapce.svg
|
||||
'';
|
||||
|
||||
desktopItems = [ (makeDesktopItem {
|
||||
name = "lapce";
|
||||
exec = "lapce %F";
|
||||
icon = "lapce";
|
||||
desktopName = "Lapce";
|
||||
comment = meta.description;
|
||||
genericName = "Code Editor";
|
||||
categories = [ "Development" "Utility" "TextEditor" ];
|
||||
}) ];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Lightning-fast and Powerful Code Editor written in Rust";
|
||||
homepage = "https://github.com/lapce/lapce";
|
||||
license = with licenses; [ asl20 ];
|
||||
maintainers = with maintainers; [ elliot ];
|
||||
broken = stdenv.isDarwin;
|
||||
};
|
||||
}
|
31
pkgs/applications/editors/lapce/fix-version.patch
Normal file
31
pkgs/applications/editors/lapce/fix-version.patch
Normal file
@ -0,0 +1,31 @@
|
||||
diff --git a/Cargo.lock b/Cargo.lock
|
||||
index bc9a0f8..45a74ad 100644
|
||||
--- a/Cargo.lock
|
||||
+++ b/Cargo.lock
|
||||
@@ -2165,7 +2165,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lapce"
|
||||
-version = "0.0.9"
|
||||
+version = "0.0.10"
|
||||
dependencies = [
|
||||
"lapce-core",
|
||||
"lapce-proxy",
|
||||
@@ -2173,7 +2173,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lapce-core"
|
||||
-version = "0.0.9"
|
||||
+version = "0.0.10"
|
||||
dependencies = [
|
||||
"Inflector",
|
||||
"alacritty_terminal 0.15.0",
|
||||
@@ -2233,7 +2233,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lapce-proxy"
|
||||
-version = "0.0.9"
|
||||
+version = "0.0.10"
|
||||
dependencies = [
|
||||
"alacritty_terminal 0.16.0-rc2",
|
||||
"anyhow",
|
@ -1374,6 +1374,18 @@ let
|
||||
};
|
||||
};
|
||||
|
||||
marp-team.marp-vscode = buildVscodeMarketplaceExtension {
|
||||
mktplcRef = {
|
||||
name = "marp-vscode";
|
||||
publisher = "marp-team";
|
||||
version = "1.5.0";
|
||||
sha256 = "0wqsj8rp58vl3nafkjvyw394h5j4jd7d24ra6hkvfpnlzrgv4yhs";
|
||||
};
|
||||
meta = {
|
||||
license = lib.licenses.mit;
|
||||
};
|
||||
};
|
||||
|
||||
mikestead.dotenv = buildVscodeMarketplaceExtension {
|
||||
mktplcRef = {
|
||||
name = "dotenv";
|
||||
@ -1679,6 +1691,18 @@ let
|
||||
};
|
||||
};
|
||||
|
||||
richie5um2.snake-trail = buildVscodeMarketplaceExtension {
|
||||
mktplcRef = {
|
||||
name = "snake-trail";
|
||||
publisher = "richie5um2";
|
||||
version = "0.6.0";
|
||||
sha256 = "0wkpq9f48hplrgabb0v1ij6fc4sb8h4a93dagw4biprhnnm3qx49";
|
||||
};
|
||||
meta = with lib; {
|
||||
license = licenses.mit;
|
||||
};
|
||||
};
|
||||
|
||||
ritwickdey.liveserver = buildVscodeMarketplaceExtension {
|
||||
mktplcRef = {
|
||||
name = "liveserver";
|
||||
|
@ -2,16 +2,16 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "kratos";
|
||||
version = "0.8.0-alpha.3";
|
||||
version = "0.8.3-alpha.1.pre.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "ory";
|
||||
repo = "kratos";
|
||||
rev = "v${version}";
|
||||
sha256 = "0ihq2kxjackicxg0hrpmx6bsgz056xbaq3j8py37z2w6mwszarcg";
|
||||
sha256 = "1225paf0x6lb6cb3q5f4lyz0r426ifx4x8145q7nsc6v64srck2y";
|
||||
};
|
||||
|
||||
vendorSha256 = "175pckj30cm5xkbvsdvwzarvwapsylyjgj4ss8v5r1sa0fjpj008";
|
||||
vendorSha256 = "10zhxbccjsp6hbmk2lnvbag6c92hz703mcaigaj4wvlf7glpldm6";
|
||||
|
||||
subPackages = [ "." ];
|
||||
|
||||
@ -25,7 +25,7 @@ buildGoModule rec {
|
||||
test/e2e/run.sh
|
||||
script/testenv.sh
|
||||
script/test-envs.sh
|
||||
persistence/sql/migratest/update_fixtures.sh
|
||||
script/debug-entrypoint.sh
|
||||
)
|
||||
patchShebangs "''${files[@]}"
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
{ lib
|
||||
, buildGoPackage
|
||||
, fetchFromGitHub
|
||||
|
||||
, withSpeech ? true
|
||||
, stdenv
|
||||
, withSpeech ? !stdenv.isDarwin
|
||||
, makeWrapper
|
||||
, espeak-ng
|
||||
}:
|
||||
|
@ -9,16 +9,16 @@
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "stork";
|
||||
version = "1.4.0";
|
||||
version = "1.4.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "jameslittle230";
|
||||
repo = "stork";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-9fylJcUuModemkBRnXeFfB1b+CD9IvTxW+CnlqaUb60=";
|
||||
sha256 = "sha256-aBsxRLUufVUauySCxZKk/ZfcU/5KR7jOHmnx6mHmsFs=";
|
||||
};
|
||||
|
||||
cargoSha256 = "sha256-j7OXl66xuTuP6hWJs+xHrwtaBGAYt02OESCN6FH3KX0=";
|
||||
cargoSha256 = "sha256-oNoWGdXYfp47IpqU1twbORPOYrHjArNf43Zyeyat4Xs=";
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
|
||||
|
@ -91,11 +91,11 @@ in
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "brave";
|
||||
version = "1.36.111";
|
||||
version = "1.36.116";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/brave/brave-browser/releases/download/v${version}/brave-browser_${version}_amd64.deb";
|
||||
sha256 = "bXZsUqLaP43wJV3Cehgblw1G179HgGhToSL36v5QseA=";
|
||||
sha256 = "whGV0VgCm6JSyrcFQTKbM35b/qLQdBmChTrYuyC+OlI=";
|
||||
};
|
||||
|
||||
dontConfigure = true;
|
||||
|
@ -1,8 +1,8 @@
|
||||
{
|
||||
"stable": {
|
||||
"version": "99.0.4844.51",
|
||||
"sha256": "1qxsn8zvvvsnn0k7nn606rhaial8ikrlfh175msqpp50xibjxicp",
|
||||
"sha256bin64": "04kqfppa88g2q54vp53avyyhqzrxljz49p4wqk76kq7fz2rm94x1",
|
||||
"version": "99.0.4844.74",
|
||||
"sha256": "165vzxv3xi4r9ia3qnqsr4p9ai0344w1pnq03c6jdq7x613lcprd",
|
||||
"sha256bin64": "1xzr7qv4rcardl3apr8w22dn81lzqkklhp26qqlbdcylacqqji04",
|
||||
"deps": {
|
||||
"gn": {
|
||||
"version": "2022-01-10",
|
||||
@ -12,10 +12,10 @@
|
||||
}
|
||||
},
|
||||
"chromedriver": {
|
||||
"version": "99.0.4844.35",
|
||||
"sha256_linux": "1q10mn34s03zy0nqcgrjd7ry53g4paxpwcki1bgicpcrwnjlzc3y",
|
||||
"sha256_darwin": "0mcfry8vqqc8n1sgyn2azr8pc4lgjnkpnhz0ggjqm12njq0lfjfx",
|
||||
"sha256_darwin_aarch64": "19wpqd5mq2vrgma899vbbdqhg660x47v4ppbz1r8dcg5r5y93x3s"
|
||||
"version": "99.0.4844.51",
|
||||
"sha256_linux": "1r5wbcfbj9s216jyjasmiscsrsix9ap3pplp12rznrwn4898p51y",
|
||||
"sha256_darwin": "1nak8p5hdrw94lx73m9c110zrwag4qr6487dhplm3qfrnrkdh8wp",
|
||||
"sha256_darwin_aarch64": "0hkcx6a8bcjlbmp6z3ld23mi1kpyjn2g7m3ns9qw6ns4x3rn5i3r"
|
||||
}
|
||||
},
|
||||
"beta": {
|
||||
|
@ -1,9 +1,9 @@
|
||||
{ lib, buildGoModule, fetchFromGitHub, fetchzip, installShellFiles }:
|
||||
|
||||
let
|
||||
version = "0.27.3";
|
||||
sha256 = "08ax1033456hfm5qz0r671xm5ig0047nqp7xffyn9za498bm4i5q";
|
||||
manifestsSha256 = "165kspq10nvlihcb1460qmbw5r1mlzs5gliw01qa4mymvzmlggk7";
|
||||
version = "0.27.4";
|
||||
sha256 = "06951i332gr17nsbns8mh4kcjilqfw5w95shaznpaksx93f554g0";
|
||||
manifestsSha256 = "0fvzh7j3vi5hw8jbw2gisjnn53bffwnp7zm3dwcbv3svwpw7823d";
|
||||
|
||||
manifests = fetchzip {
|
||||
url =
|
||||
@ -23,7 +23,7 @@ in buildGoModule rec {
|
||||
inherit sha256;
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-ENSfec7iSKOkILgVCVnORpAia4D+vBjQAUXDA7EIvVQ=";
|
||||
vendorSha256 = "sha256-7sHLXjyYMWSFckDPeVGJYK+nwhbRpD76tV334PCVYwA=";
|
||||
|
||||
postUnpack = ''
|
||||
cp -r ${manifests} source/cmd/flux/manifests
|
||||
|
@ -15,6 +15,8 @@
|
||||
, zlib
|
||||
, zstd
|
||||
, openssl
|
||||
, glibc
|
||||
, nixosTests
|
||||
}:
|
||||
|
||||
with lib;
|
||||
@ -22,7 +24,7 @@ with lib;
|
||||
assert elem stdenv.system [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
|
||||
|
||||
let
|
||||
common = { pname, version, untarDir ? "${pname}-${version}", sha256, jdk, openssl ? null, nativeLibs ? [ ], libPatches ? "" }:
|
||||
common = { pname, version, untarDir ? "${pname}-${version}", sha256, jdk, openssl ? null, nativeLibs ? [ ], libPatches ? "", tests }:
|
||||
stdenv.mkDerivation rec {
|
||||
inherit pname version jdk libPatches untarDir openssl;
|
||||
src = fetchurl {
|
||||
@ -38,7 +40,10 @@ let
|
||||
installPhase = ''
|
||||
mkdir -p $out/{lib/${untarDir}/conf,bin,lib}
|
||||
mv * $out/lib/${untarDir}
|
||||
|
||||
'' + optionalString stdenv.isLinux ''
|
||||
# All versions need container-executor, but some versions can't use autoPatchelf because of broken SSL versions
|
||||
patchelf --set-interpreter ${glibc.out}/lib64/ld-linux-x86-64.so.2 $out/lib/${untarDir}/bin/container-executor
|
||||
'' + ''
|
||||
for n in $(find $out/lib/${untarDir}/bin -type f ! -name "*.*"); do
|
||||
makeWrapper "$n" "$out/bin/$(basename $n)"\
|
||||
--set-default JAVA_HOME ${jdk.home}\
|
||||
@ -49,6 +54,8 @@ let
|
||||
done
|
||||
'' + libPatches;
|
||||
|
||||
passthru = { inherit tests; };
|
||||
|
||||
meta = {
|
||||
homepage = "https://hadoop.apache.org/";
|
||||
description = "Framework for distributed processing of large data sets across clusters of computers";
|
||||
@ -73,30 +80,34 @@ in
|
||||
{
|
||||
# Different version of hadoop support different java runtime versions
|
||||
# https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions
|
||||
hadoop_3_3 =
|
||||
common
|
||||
(rec {
|
||||
pname = "hadoop";
|
||||
version = "3.3.1";
|
||||
untarDir = "${pname}-${version}";
|
||||
sha256 = rec {
|
||||
x86_64-linux = "1b3v16ihysqaxw8za1r5jlnphy8dwhivdx2d0z64309w57ihlxxd";
|
||||
x86_64-darwin = x86_64-linux;
|
||||
aarch64-linux = "00ln18vpi07jq2slk3kplyhcj8ad41n0yl880q5cihilk7daclxz";
|
||||
aarch64-darwin = aarch64-linux;
|
||||
};
|
||||
|
||||
inherit openssl;
|
||||
nativeLibs = [ stdenv.cc.cc.lib protobuf3_7 zlib snappy ];
|
||||
libPatches = ''
|
||||
ln -s ${getLib cyrus_sasl}/lib/libsasl2.so $out/lib/${untarDir}/lib/native/libsasl2.so.2
|
||||
ln -s ${getLib openssl}/lib/libcrypto.so $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib zlib}/lib/libz.so.1 $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib zstd}/lib/libzstd.so.1 $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib bzip2}/lib/libbz2.so.1 $out/lib/${untarDir}/lib/native/
|
||||
'' + optionalString stdenv.isLinux "patchelf --add-rpath ${jdk.home}/lib/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0";
|
||||
jdk = jdk11_headless;
|
||||
});
|
||||
hadoop_3_3 = common rec {
|
||||
pname = "hadoop";
|
||||
version = "3.3.1";
|
||||
untarDir = "${pname}-${version}";
|
||||
sha256 = rec {
|
||||
x86_64-linux = "1b3v16ihysqaxw8za1r5jlnphy8dwhivdx2d0z64309w57ihlxxd";
|
||||
x86_64-darwin = x86_64-linux;
|
||||
aarch64-linux = "00ln18vpi07jq2slk3kplyhcj8ad41n0yl880q5cihilk7daclxz";
|
||||
aarch64-darwin = aarch64-linux;
|
||||
};
|
||||
jdk = jdk11_headless;
|
||||
inherit openssl;
|
||||
# TODO: Package and add Intel Storage Acceleration Library
|
||||
nativeLibs = [ stdenv.cc.cc.lib protobuf3_7 zlib snappy ];
|
||||
libPatches = ''
|
||||
ln -s ${getLib cyrus_sasl}/lib/libsasl2.so $out/lib/${untarDir}/lib/native/libsasl2.so.2
|
||||
ln -s ${getLib openssl}/lib/libcrypto.so $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib zlib}/lib/libz.so.1 $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib zstd}/lib/libzstd.so.1 $out/lib/${untarDir}/lib/native/
|
||||
ln -s ${getLib bzip2}/lib/libbz2.so.1 $out/lib/${untarDir}/lib/native/
|
||||
'' + optionalString stdenv.isLinux ''
|
||||
# libjvm.so for Java >=11
|
||||
patchelf --add-rpath ${jdk.home}/lib/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0
|
||||
# Java 8 has libjvm.so at a different path
|
||||
patchelf --add-rpath ${jdk.home}/jre/lib/amd64/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0
|
||||
'';
|
||||
tests = nixosTests.hadoop;
|
||||
};
|
||||
hadoop_3_2 = common rec {
|
||||
pname = "hadoop";
|
||||
version = "3.2.2";
|
||||
@ -104,11 +115,13 @@ in
|
||||
jdk = jdk8_headless;
|
||||
# not using native libs because of broken openssl_1_0_2 dependency
|
||||
# can be manually overriden
|
||||
tests = nixosTests.hadoop_3_2;
|
||||
};
|
||||
hadoop2 = common rec {
|
||||
pname = "hadoop";
|
||||
version = "2.10.1";
|
||||
sha256.x86_64-linux = "1w31x4bk9f2swnx8qxx0cgwfg8vbpm6cy5lvfnbbpl3rsjhmyg97";
|
||||
jdk = jdk8_headless;
|
||||
tests = nixosTests.hadoop2;
|
||||
};
|
||||
}
|
||||
|
@ -2,16 +2,16 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "qbec";
|
||||
version = "0.15.1";
|
||||
version = "0.15.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "splunk";
|
||||
repo = "qbec";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-cXU+LnOCsGg+iwH5c7cKVi2Htw45AGxyjJFKXKbTkUo=";
|
||||
sha256 = "sha256-js/UjnNYRW7s3b4TeprhmBe4cDLDYDrMeLtpASI9aN4=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-CiVAzFN/ygIiyhZKYtJ197TZO3ppL/emWSj4hAlIanc=";
|
||||
vendorSha256 = "sha256-oEbKk9cMbI0ZWXrfM8Y19OF/A75mwHl0C/PJx0oTOBo=";
|
||||
|
||||
doCheck = false;
|
||||
|
||||
|
@ -1,13 +1,26 @@
|
||||
{ lib, stdenv, fetchzip, makeWrapper, jdk8, python3Packages, extraPythonPackages ? [], coreutils, hadoop
|
||||
, RSupport? true, R
|
||||
{ lib
|
||||
, stdenv
|
||||
, fetchzip
|
||||
, makeWrapper
|
||||
, jdk8
|
||||
, python3Packages
|
||||
, extraPythonPackages ? [ ]
|
||||
, coreutils
|
||||
, hadoop
|
||||
, RSupport ? true
|
||||
, R
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
spark = { pname, version, src }:
|
||||
spark = { pname, version, sha256 }:
|
||||
stdenv.mkDerivation rec {
|
||||
inherit pname version src;
|
||||
inherit pname version;
|
||||
src = fetchzip {
|
||||
url = "mirror://apache/spark/${pname}-${version}/${pname}-${version}-bin-without-hadoop.tgz";
|
||||
sha256 = sha256;
|
||||
};
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
buildInputs = [ jdk8 python3Packages.python ]
|
||||
++ extraPythonPackages
|
||||
@ -45,31 +58,29 @@ let
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Apache Spark is a fast and general engine for large-scale data processing";
|
||||
homepage = "https://spark.apache.org/";
|
||||
license = lib.licenses.asl20;
|
||||
platforms = lib.platforms.all;
|
||||
maintainers = with maintainers; [ thoughtpolice offline kamilchm illustris ];
|
||||
description = "Apache Spark is a fast and general engine for large-scale data processing";
|
||||
homepage = "https://spark.apache.org/";
|
||||
license = lib.licenses.asl20;
|
||||
platforms = lib.platforms.all;
|
||||
maintainers = with maintainers; [ thoughtpolice offline kamilchm illustris ];
|
||||
repositories.git = "git://git.apache.org/spark.git";
|
||||
};
|
||||
};
|
||||
in {
|
||||
spark3 = spark rec {
|
||||
in
|
||||
{
|
||||
spark_3_2 = spark rec {
|
||||
pname = "spark";
|
||||
version = "3.2.1";
|
||||
sha256 = "0kxdqczwmj6pray0h8h1qhygni9m82jzznw5fbv9hrxrkq1v182d";
|
||||
};
|
||||
spark_3_1 = spark rec {
|
||||
pname = "spark";
|
||||
version = "3.1.2";
|
||||
|
||||
src = fetchzip {
|
||||
url = "mirror://apache/spark/${pname}-${version}/${pname}-${version}-bin-without-hadoop.tgz";
|
||||
sha256 = "1bgh2y6jm7wqy6yc40rx68xkki31i3jiri2yixb1bm0i9pvsj9yf";
|
||||
};
|
||||
sha256 = "1bgh2y6jm7wqy6yc40rx68xkki31i3jiri2yixb1bm0i9pvsj9yf";
|
||||
};
|
||||
spark2 = spark rec {
|
||||
spark_2_4 = spark rec {
|
||||
pname = "spark";
|
||||
version = "2.4.8";
|
||||
|
||||
src = fetchzip {
|
||||
url = "mirror://apache/spark/${pname}-${version}/${pname}-${version}-bin-without-hadoop.tgz";
|
||||
sha256 = "1mkyq0gz9fiav25vr0dba5ivp0wh0mh7kswwnx8pvsmb6wbwyfxv";
|
||||
};
|
||||
sha256 = "1mkyq0gz9fiav25vr0dba5ivp0wh0mh7kswwnx8pvsmb6wbwyfxv";
|
||||
};
|
||||
}
|
||||
|
@ -4,11 +4,11 @@ let
|
||||
configOverrides = writeText "cinny-config-overrides.json" (builtins.toJSON conf);
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "cinny";
|
||||
version = "1.8.0";
|
||||
version = "1.8.1";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/ajbura/cinny/releases/download/v${version}/cinny-v${version}.tar.gz";
|
||||
sha256 = "0pbapzl3pfx87ns4vp7088kkhl34c0ihbq90r3d0iz6sa16mcs79";
|
||||
sha256 = "13jd7hihkw3nlcj0m157z6qix61v6zjs52h5zmw2agm47qmv0w6z";
|
||||
};
|
||||
|
||||
installPhase = ''
|
||||
|
@ -12,16 +12,16 @@
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "himalaya";
|
||||
version = "0.5.8";
|
||||
version = "0.5.9";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "soywod";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-Ejaspj0YpkGmfO1omOhx8ZDg77J7NqC32mw5Cd3K1FM=";
|
||||
sha256 = "sha256-g+ySsHnJ4FpmJLEjlutuiJmMkKI3Jb+HkWi1WBIo1aw=";
|
||||
};
|
||||
|
||||
cargoSha256 = "sha256-xce2iHrqTxIirrut4dN7526pjE4T+ruaDS44jr+KeGs=";
|
||||
cargoSha256 = "sha256-NkkONl57zSilElVAOXUBxWnims4+EIVkkTdExbeBAaQ=";
|
||||
|
||||
nativeBuildInputs = lib.optionals enableCompletions [ installShellFiles ]
|
||||
++ lib.optionals (!stdenv.hostPlatform.isDarwin) [ pkg-config ];
|
||||
@ -34,6 +34,8 @@ rustPlatform.buildRustPackage rec {
|
||||
openssl
|
||||
];
|
||||
|
||||
# flag added because without end-to-end testing is ran which requires
|
||||
# additional tooling and servers to test
|
||||
cargoTestFlags = [ "--lib" ];
|
||||
|
||||
postInstall = lib.optionalString enableCompletions ''
|
||||
@ -45,10 +47,10 @@ rustPlatform.buildRustPackage rec {
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "CLI email client written in Rust";
|
||||
description = "Command-line interface for email management";
|
||||
homepage = "https://github.com/soywod/himalaya";
|
||||
changelog = "https://github.com/soywod/himalaya/blob/v${version}/CHANGELOG.md";
|
||||
license = licenses.bsdOriginal;
|
||||
maintainers = with maintainers; [ yanganto ];
|
||||
maintainers = with maintainers; [ toastal yanganto ];
|
||||
};
|
||||
}
|
||||
|
@ -2,13 +2,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "QtRVSim";
|
||||
version = "0.9.1";
|
||||
version = "0.9.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "cvut";
|
||||
repo = "qtrvsim";
|
||||
rev = "refs/tags/v${version}";
|
||||
sha256 = "AOksVS0drIBnK4RCxZw40yVxf4E8GjG9kU0rIZsY9gA=";
|
||||
sha256 = "B1l+ysrodeDbxYfdLLMF8yk4/uPXTcDrTaMtYm89HuU=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake wrapQtAppsHook ];
|
||||
|
@ -23,11 +23,11 @@ stdenv.mkDerivation rec {
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
mkdir -p $out/bin $out/share/${pname} $out/share/applications
|
||||
mkdir -p $out/bin $out/share/${pname} $out/share/applications $out/share/icons/hicolor/scalable/apps
|
||||
|
||||
cp -a ${appimageContents}/{locales,resources} $out/share/${pname}
|
||||
cp -a ${appimageContents}/freetube.desktop $out/share/applications/${pname}.desktop
|
||||
cp -a ${appimageContents}/usr/share/icons $out/share
|
||||
cp -a ${appimageContents}/usr/share/icons/hicolor/scalable/freetube.svg $out/share/icons/hicolor/scalable/apps
|
||||
|
||||
substituteInPlace $out/share/applications/${pname}.desktop \
|
||||
--replace 'Exec=AppRun' 'Exec=${pname}'
|
||||
|
@ -8,13 +8,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "smplayer";
|
||||
version = "21.10.0";
|
||||
version = "22.2.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "smplayer-dev";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
hash = "sha256-p6036c8KX3GCINmkjHZlDLgHhLKri+t2WNWzP4KsSI8=";
|
||||
hash = "sha256-7DMvIqW3vzjVzJPyjbXuHHcf1T6EFcf/a/mVYqa3XS8=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -22,7 +22,9 @@ stdenv.mkDerivation rec {
|
||||
wrapQtAppsHook
|
||||
];
|
||||
|
||||
buildInputs = [ qtscript ];
|
||||
buildInputs = [
|
||||
qtscript
|
||||
];
|
||||
|
||||
dontUseQmakeConfigure = true;
|
||||
|
||||
|
26
pkgs/data/fonts/borg-sans-mono/default.nix
Normal file
26
pkgs/data/fonts/borg-sans-mono/default.nix
Normal file
@ -0,0 +1,26 @@
|
||||
{ lib, fetchzip }:
|
||||
|
||||
let
|
||||
pname = "borg-sans-mono";
|
||||
version = "0.2.0";
|
||||
in
|
||||
fetchzip {
|
||||
name = "${pname}-${version}";
|
||||
|
||||
# https://github.com/marnen/borg-sans-mono/issues/19
|
||||
url = "https://github.com/marnen/borg-sans-mono/files/107663/BorgSansMono.ttf.zip";
|
||||
sha256 = "1gz4ab0smw76ih5cs2l3n92c77nv7ld5zghq42avjsfhxrc2n5ri";
|
||||
|
||||
postFetch = ''
|
||||
mkdir -p $out/share/fonts
|
||||
unzip -j $downloadedFile \*.ttf -d $out/share/fonts/truetype
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Droid Sans Mono Slashed + Hasklig-style ligatures";
|
||||
homepage = "https://github.com/marnen/borg-sans-mono";
|
||||
license = licenses.asl20;
|
||||
platforms = platforms.all;
|
||||
maintainers = with maintainers; [ atila ];
|
||||
};
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
|
||||
let
|
||||
pname = "vazir-fonts";
|
||||
version = "30.1.0";
|
||||
version = "32.0.0";
|
||||
in fetchFromGitHub {
|
||||
name = "${pname}-${version}";
|
||||
|
||||
@ -14,7 +14,7 @@ in fetchFromGitHub {
|
||||
tar xf $downloadedFile --strip=1
|
||||
find . -name '*.ttf' -exec install -m444 -Dt $out/share/fonts/truetype {} \;
|
||||
'';
|
||||
sha256 = "sha256-J1l6rBFgaXFtGnK0pH7GbaYTt5TI/OevjZrXmaEgkB4=";
|
||||
sha256 = "sha256-Uy8hgBtCcTLwXu9FkLN1WavUfP74Jf53ChxVGS3UBVM=";
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://github.com/rastikerdar/vazir-font";
|
||||
|
@ -2,13 +2,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "kora-icon-theme";
|
||||
version = "1.5.0";
|
||||
version = "1.5.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "bikass";
|
||||
repo = "kora";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-kUgNj7KuxsQ/BvQ0ORl3xzEm9gv69+2PS0Bgv8i/S9U=";
|
||||
sha256 = "sha256-3TKjd2Lblb+/zFq7rkdgnD1dJU3kis7QZi7Ui74IWzA=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -33,6 +33,7 @@ let
|
||||
i686-linux = "linux-i686";
|
||||
x86_64-darwin = "darwin-x86_64";
|
||||
aarch64-darwin = "darwin-universal";
|
||||
aarch64-linux = "linux-aarch64";
|
||||
};
|
||||
|
||||
arch = archs.${stdenv.system} or (throw "system ${stdenv.system} not supported");
|
||||
@ -40,13 +41,19 @@ let
|
||||
|
||||
checkInputs = [ git gmp openssl readline libxml2 libyaml ];
|
||||
|
||||
binaryUrl = version: rel:
|
||||
if arch == archs.aarch64-linux then
|
||||
"https://dev.alpinelinux.org/archive/crystal/crystal-${version}-aarch64-alpine-linux-musl.tar.gz"
|
||||
else
|
||||
"https://github.com/crystal-lang/crystal/releases/download/${version}/crystal-${version}-${toString rel}-${arch}.tar.gz";
|
||||
|
||||
genericBinary = { version, sha256s, rel ? 1 }:
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "crystal-binary";
|
||||
inherit version;
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/crystal-lang/crystal/releases/download/${version}/crystal-${version}-${toString rel}-${arch}.tar.gz";
|
||||
url = binaryUrl version rel;
|
||||
sha256 = sha256s.${stdenv.system};
|
||||
};
|
||||
|
||||
@ -225,6 +232,7 @@ rec {
|
||||
x86_64-linux = "1949argajiyqyq09824yj3wjyv88gd8wbf20xh895saqfykiq880";
|
||||
i686-linux = "0w0f4fwr2ijhx59i7ppicbh05hfmq7vffmgl7lal6im945m29vch";
|
||||
x86_64-darwin = "01n0rf8zh551vv8wq3h0ifnsai0fz9a77yq87xx81y9dscl9h099";
|
||||
aarch64-linux = "0sns7l4q3z82qi3dc2r4p63f4s8hvifqzgq56ykwyrvawynjhd53";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -12,14 +12,14 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "open-watcom-v2";
|
||||
version = "unstable-2022-02-22";
|
||||
version = "unstable-2022-03-14";
|
||||
name = "${pname}-unwrapped-${version}";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "open-watcom";
|
||||
repo = "open-watcom-v2";
|
||||
rev = "9e25b3d6b8066f09b4f7131a31de1cf2af691e9a";
|
||||
sha256 = "1w336070kmhc6cmn2aqr8vm0fmw3yza2n0w4asvs2kqxjgmbn6i2";
|
||||
rev = "22627ccc1bd3de70aff9ac056e0dc9ecf7f7b6ec";
|
||||
sha256 = "khy/fhmQjTGKfx6iOUBt+ySwpEx0df/7meyNvBnJAPY=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
|
@ -10,13 +10,13 @@
|
||||
|
||||
buildDotnetModule rec {
|
||||
pname = "python-language-server";
|
||||
version = "2021-09-08";
|
||||
version = "2022-02-18";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "microsoft";
|
||||
repo = "python-language-server";
|
||||
rev = "26ea18997f45f7d7bc5a3c5a9efc723a8dbb02fa";
|
||||
sha256 = "1m8pf9k20wy4fzv27v3bswvc8s01ag6ka2qm9nn6bgq0s0lq78mh";
|
||||
rev = "52c1afd34b5acb0b44597bb8681232876fe94084";
|
||||
sha256 = "05s8mwi3dqzjghgpr1mfs1b7cgrq818bbj1v7aly6axc8c2n4gny";
|
||||
};
|
||||
|
||||
projectFile = "src/LanguageServer/Impl/Microsoft.Python.LanguageServer.csproj";
|
||||
|
@ -24,8 +24,6 @@ self: super: {
|
||||
|
||||
nix-linter = self.callPackage ../../development/tools/analysis/nix-linter { };
|
||||
|
||||
nix-output-monitor = self.callPackage ../../tools/nix/nix-output-monitor { };
|
||||
|
||||
# hasura graphql-engine is not released to hackage.
|
||||
# https://github.com/hasura/graphql-engine/issues/7391
|
||||
ci-info = self.callPackage ../misc/haskell/hasura/ci-info.nix {};
|
||||
|
@ -201,9 +201,9 @@ in {
|
||||
major = "3";
|
||||
minor = "11";
|
||||
patch = "0";
|
||||
suffix = "a4";
|
||||
suffix = "a6";
|
||||
};
|
||||
sha256 = "sha256-Q3/nN2w2Pa+vNM6A8ERrQfyaQsDiqMflGdPwoLfPs+0=";
|
||||
sha256 = "sha256-HFOi/3WHljPjDKwp0qpregEONVuV8L+axpG+zPX50So=";
|
||||
inherit (darwin) configd;
|
||||
inherit passthruFun;
|
||||
};
|
||||
|
@ -1,44 +0,0 @@
|
||||
{ lib, stdenv, fetchurl, fetchpatch, python2 }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "clearsilver";
|
||||
version = "0.10.5";
|
||||
|
||||
src = fetchurl {
|
||||
url = "http://www.clearsilver.net/downloads/clearsilver-${version}.tar.gz";
|
||||
sha256 = "1046m1dpq3nkgxbis2dr2x7hynmy51n64465q78d7pdgvqwa178y";
|
||||
};
|
||||
|
||||
PYTHON_SITE = "${placeholder "out"}/${python2.sitePackages}";
|
||||
|
||||
configureFlags = [
|
||||
"--with-python=${python2.interpreter}"
|
||||
"--disable-apache"
|
||||
"--disable-perl"
|
||||
"--disable-ruby"
|
||||
"--disable-java"
|
||||
"--disable-csharp"
|
||||
];
|
||||
|
||||
preInstall = ''
|
||||
mkdir -p $out
|
||||
mkdir -p $out/${python2.sitePackages}
|
||||
'';
|
||||
|
||||
patches = [
|
||||
(fetchpatch {
|
||||
url = "https://sources.debian.net/data/main/c/clearsilver/0.10.5-1.6/debian/patches/clang-gcc5.patch";
|
||||
sha256 = "0d44v9jx0b6k8nvrhknd958i9rs59kdh73z0lb4f1mzi8if16c38";
|
||||
})
|
||||
(fetchpatch {
|
||||
url = "https://sources.debian.net/data/main/c/clearsilver/0.10.5-1.6/debian/patches/CVE-2011-4357.diff";
|
||||
sha256 = "1lfncavxdqckrz03gv97lcliygbpi9lnih944vmdbn9zw6fwcipi";
|
||||
})
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Fast, powerful, and language-neutral HTML template system";
|
||||
homepage = "http://www.clearsilver.net/";
|
||||
license = licenses.free;
|
||||
};
|
||||
}
|
@ -6,13 +6,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "intel-gmmlib";
|
||||
version = "22.0.3";
|
||||
version = "22.1.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "intel";
|
||||
repo = "gmmlib";
|
||||
rev = "intel-gmmlib-${version}";
|
||||
sha256 = "sha256-cXolz4hKLSTs8K9tCxaKnC2Pr0lQ0M+pPeF2w6bOAR8=";
|
||||
sha256 = "sha256-4LFBokMEhhobKIMzZYlt3Nn88lX60l+IZZ0gi+o7Tds=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake ];
|
||||
|
@ -1,19 +1,20 @@
|
||||
{ mkDerivation, lib, fetchFromGitLab, libarchive, xz, zlib, bzip2, cmake, ninja }:
|
||||
{ mkDerivation, lib, fetchFromGitLab, libarchive, xz, zlib, bzip2, meson, pkg-config, ninja }:
|
||||
|
||||
mkDerivation rec {
|
||||
pname = "libarchive-qt";
|
||||
version = "2.0.6";
|
||||
version = "2.0.7";
|
||||
|
||||
src = fetchFromGitLab {
|
||||
owner = "marcusbritanicus";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-Z+2zjQolV1Ncr6v9r7fGrc/fEMt0iMtGwv9eZ2Tu2cA=";
|
||||
sha256 = "sha256-KRywB+Op44N00q9tgO2WNCliRgUDRvrCms1O8JYt62o=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
cmake
|
||||
meson
|
||||
ninja
|
||||
pkg-config
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
|
31
pkgs/development/libraries/libctl/default.nix
Normal file
31
pkgs/development/libraries/libctl/default.nix
Normal file
@ -0,0 +1,31 @@
|
||||
{ lib
|
||||
, stdenv
|
||||
, fetchFromGitHub
|
||||
, autoreconfHook
|
||||
, gfortran
|
||||
, guile
|
||||
, pkg-config
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "libctl";
|
||||
version = "4.5.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "NanoComp";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "uOydBWYPXSBUi+4MM6FNx6B5l2to7Ny9Uc1MMTV9bGA=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ autoreconfHook gfortran guile pkg-config ];
|
||||
|
||||
configureFlags = [ "--enable-shared" ];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Guile-based library for supporting flexible control files in scientific simulations";
|
||||
homepage = "https://github.com/NanoComp/libctl";
|
||||
license = licenses.gpl2Only;
|
||||
maintainers = with maintainers; [ carpinchomug ];
|
||||
};
|
||||
}
|
@ -6,16 +6,17 @@
|
||||
, perl
|
||||
, libxml2
|
||||
, fuse
|
||||
, fuse3
|
||||
, gnutls
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "libnbd";
|
||||
version = "1.9.5";
|
||||
version = "1.12.2";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://download.libguestfs.org/libnbd/${lib.versions.majorMinor version}-development/${pname}-${version}.tar.gz";
|
||||
hash = "sha256-BnMoxIiuwhqcwVr3AwAIFgZPcFsIg55N66ZwWMTUnCw=";
|
||||
url = "https://download.libguestfs.org/libnbd/${lib.versions.majorMinor version}-stable/${pname}-${version}.tar.gz";
|
||||
hash = "sha256-57veJapt72LkP02wO4c1nDdHmnodqfT+rKPNDeTGQPM=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -26,6 +27,7 @@ stdenv.mkDerivation rec {
|
||||
|
||||
buildInputs = [
|
||||
fuse
|
||||
fuse3
|
||||
gnutls
|
||||
libxml2
|
||||
];
|
||||
@ -55,7 +57,6 @@ stdenv.mkDerivation rec {
|
||||
platforms = with platforms; linux;
|
||||
};
|
||||
}
|
||||
# TODO: NBD URI support apparently is not enabled
|
||||
# TODO: package the 1.6-stable version too
|
||||
# TODO: git version needs ocaml
|
||||
# TODO: bindings for go, ocaml and python
|
||||
|
@ -21,13 +21,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "opendht";
|
||||
version = "2.3.2";
|
||||
version = "2.3.5";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "savoirfairelinux";
|
||||
repo = "opendht";
|
||||
rev = version;
|
||||
sha256 = "sha256-LevS9euBAFkI1ll79uqmVaRR/6FH6Z4cypHqvCIWxgU=";
|
||||
sha256 = "sha256-GGaq8ziOCUDMxILq2QYUkSP4usBjbufbHwQF4Pr6hHw=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -2,16 +2,16 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "protolock";
|
||||
version = "0.15.2";
|
||||
version = "0.16.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "nilslice";
|
||||
repo = "protolock";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-cKrG8f8cabuGDN1gmBYleXcBqeJksdREiEy63UK/6J0=";
|
||||
sha256 = "sha256-vWwRZVArmlTIGwD4zV3dEHN2kkoeCZuNIvjCBVAviPo=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-2XbBiiiPvZCnlKUzGDLFnxA34N/LmHoPbvRKZckmhx4=";
|
||||
vendorSha256 = "sha256-kgSJUSjY8kgrGCNDPgw1WA8KwAqI5koJQ0IcE+tC5nk=";
|
||||
|
||||
doCheck = false;
|
||||
|
||||
|
@ -359,7 +359,7 @@ let
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://registry.npmjs.org/prisma/-/prisma-${version}.tgz";
|
||||
sha512 = "sha512-dAld12vtwdz9Rz01nOjmnXe+vHana5PSog8t0XGgLemKsUVsaupYpr74AHaS3s78SaTS5s2HOghnJF+jn91ZrA==";
|
||||
sha512 = "sha512-8SdsLPhKR3mOfoo2o73h9mNn3v5kA/RqGA26Sv6qDS78Eh2uepPqt5e8/nwj5EOblYm5HEGuitaXQrOCLb6uTw==";
|
||||
};
|
||||
postInstall = with pkgs; ''
|
||||
wrapProgram "$out/bin/prisma" \
|
||||
|
@ -6,13 +6,13 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "adafruit-platformdetect";
|
||||
version = "3.21.0";
|
||||
version = "3.21.1";
|
||||
format = "setuptools";
|
||||
|
||||
src = fetchPypi {
|
||||
pname = "Adafruit-PlatformDetect";
|
||||
inherit version;
|
||||
sha256 = "sha256-H65Ar/+9AwhKFNRK/SZyU8XzrMt3myjBo+YNJYtQ0b4=";
|
||||
sha256 = "sha256-gVJUjxsl1rxvboL53186r63yp0k4FtTSgKJuqPzE2Q0=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "androidtv";
|
||||
version = "0.0.64";
|
||||
version = "0.0.65";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
@ -20,7 +20,7 @@ buildPythonPackage rec {
|
||||
owner = "JeffLIrion";
|
||||
repo = "python-androidtv";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-CJJ+mWAX9XG1/E2PljUZ8oz/la3hYXF1tMfuKt0Zvjw=";
|
||||
hash = "sha256-bhXmPplRT9gzeD/GdD2HxN+Z4vvaiaxBwkqSml9SJUs=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -12,14 +12,14 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "awscrt";
|
||||
version = "0.13.3";
|
||||
version = "0.13.5";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
hash = "sha256-1GaKDpOGX/YbM4rByTw0nYgwHYFvOLHZ0GRvanX3vAU=";
|
||||
hash = "sha256-dUNljMKsbl6eByhEYivWgRJczTBw3N1RVl8r3e898mg=";
|
||||
};
|
||||
|
||||
buildInputs = lib.optionals stdenv.isDarwin [
|
||||
|
@ -1,22 +1,24 @@
|
||||
{ lib
|
||||
, buildPythonPackage
|
||||
, fetchPypi
|
||||
, isPy3k
|
||||
, pythonOlder
|
||||
, msrest
|
||||
, msrestazure
|
||||
, azure-common
|
||||
, azure-mgmt-core
|
||||
, azure-mgmt-nspkg
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "azure-mgmt-monitor";
|
||||
version = "3.0.0";
|
||||
version = "3.1.0";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
extension = "zip";
|
||||
sha256 = "91ddb7333bf2b9541a53864cc8d2501e3694a03a9c0e41cbfae3348558675ce6";
|
||||
hash = "sha256-ROcUAm0KgIjO2A2XBpS00IeEPgd8x4cjoMfn6X9C+Gw=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
@ -24,13 +26,13 @@ buildPythonPackage rec {
|
||||
msrestazure
|
||||
azure-common
|
||||
azure-mgmt-core
|
||||
] ++ lib.optionals (!isPy3k) [
|
||||
azure-mgmt-nspkg
|
||||
];
|
||||
|
||||
pythonNamespaces = [ "azure.mgmt" ];
|
||||
pythonNamespaces = [
|
||||
"azure.mgmt"
|
||||
];
|
||||
|
||||
# has no tests
|
||||
# Module has no tests
|
||||
doCheck = false;
|
||||
|
||||
meta = with lib; {
|
||||
|
@ -1,24 +1,37 @@
|
||||
{ lib, buildPythonPackage, fetchPypi, six}:
|
||||
{ lib
|
||||
, buildPythonPackage
|
||||
, fetchPypi
|
||||
, pythonOlder
|
||||
, six
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "bumps";
|
||||
version = "0.8.1";
|
||||
version = "0.9.0";
|
||||
format = "setuptools";
|
||||
|
||||
propagatedBuildInputs = [six];
|
||||
|
||||
# Bumps does not provide its own tests.py, so the test
|
||||
# always fails
|
||||
doCheck = false;
|
||||
disabled = pythonOlder "3.7";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "f4f2ee712a1e468a2ce5c0a32f67739a83331f0cb7b9c50b9e7510daefc12169";
|
||||
hash = "sha256-BY9kg0ksKfrpQgsl1aDDJJ+zKJmURqwTtKxlITxse+o=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
six
|
||||
];
|
||||
|
||||
# Module has no tests
|
||||
doCheck = false;
|
||||
|
||||
pythonImportsCheck = [
|
||||
"bumps"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://www.reflectometry.org/danse/software.html";
|
||||
description = "Data fitting with bayesian uncertainty analysis";
|
||||
maintainers = with maintainers; [ rprospero ];
|
||||
homepage = "https://bumps.readthedocs.io/";
|
||||
license = licenses.publicDomain;
|
||||
maintainers = with maintainers; [ rprospero ];
|
||||
};
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ lib
|
||||
, buildPythonPackage
|
||||
, isPy3k
|
||||
, pythonOlder
|
||||
, fetchPypi
|
||||
, requests
|
||||
, requests-toolbelt
|
||||
@ -9,12 +9,14 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "cloudscraper";
|
||||
version = "1.2.58";
|
||||
disabled = !isPy3k;
|
||||
version = "1.2.60";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "1wnzv2k8cm8q1x18r4zg8pcnpm4gsdp82hywwjimp2v2qll918nx";
|
||||
hash = "sha256-DTQTsv/59895UTsMmqxYtSfFosUWPRx8wMT4zKHQ9Oc=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
@ -27,10 +29,12 @@ buildPythonPackage rec {
|
||||
# nixpkgs yet, and also aren't included in the PyPI bundle. TODO.
|
||||
doCheck = false;
|
||||
|
||||
pythonImportsCheck = [ "cloudscraper" ];
|
||||
pythonImportsCheck = [
|
||||
"cloudscraper"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "A Python module to bypass Cloudflare's anti-bot page";
|
||||
description = "Python module to bypass Cloudflare's anti-bot page";
|
||||
homepage = "https://github.com/venomous/cloudscraper";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ kini ];
|
||||
|
26
pkgs/development/python-modules/docx2txt/default.nix
Normal file
26
pkgs/development/python-modules/docx2txt/default.nix
Normal file
@ -0,0 +1,26 @@
|
||||
{ lib
|
||||
, buildPythonPackage
|
||||
, fetchPypi
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "docx2txt";
|
||||
version = "0.8";
|
||||
format = "setuptools";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
hash = "sha256-LAbZjXz+LTlH5XYKV9kk4/8HdFs3nIc3cjki5wCSNuU=";
|
||||
};
|
||||
|
||||
pythonImportsCheck = [
|
||||
"docx2txt"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "A pure python-based utility to extract text and images from docx files";
|
||||
homepage = "https://github.com/ankushshah89/python-docx2txt";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ ilkecan ];
|
||||
};
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "env-canada";
|
||||
version = "0.5.20";
|
||||
version = "0.5.21";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -23,7 +23,7 @@ buildPythonPackage rec {
|
||||
owner = "michaeldavie";
|
||||
repo = "env_canada";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-gYl5+rtOzci3nhgP74VM37tNk9pPWgcNBfcSSG1fSJs=";
|
||||
sha256 = "sha256-jildWpYWll5j7siYhNECMBjz9bF41xFA6NyydWNdgQE=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -12,14 +12,14 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "google-cloud-redis";
|
||||
version = "2.7.1";
|
||||
version = "2.8.0";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
hash = "sha256-tz2upcRjgE6/4cB0riARwot3Vhw4QSKqqHTlJS3i7is=";
|
||||
hash = "sha256-7L3SjViQmzTp//5LWWG9VG+TQuPay70KZdUuzhy7HS0=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -9,15 +9,19 @@
|
||||
, google-cloud-testutils
|
||||
, google-resumable-media
|
||||
, mock
|
||||
, pythonOlder
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "google-cloud-storage";
|
||||
version = "2.2.0";
|
||||
version = "2.2.1";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "sha256-01mWgBE11R20m7j3p+Kc7cwlqotDXu0MTA7y+e5W0dk=";
|
||||
hash = "sha256-AkT0YScQy17ERfxndDh1ZOI/mCM2P7QIsock4hAkAbc=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "intellifire4py";
|
||||
version = "1.0.0";
|
||||
version = "1.0.1";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
@ -20,7 +20,7 @@ buildPythonPackage rec {
|
||||
owner = "jeeftor";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
hash = "sha256-lQV5KpASbrz+wCi9x/0rNYrQE+dLCZzsNBFhYAQvPH4=";
|
||||
hash = "sha256-hKe9sDn5t2qQ0THqFQypAGgr7cJXaZs8562NpPR/iJU=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "mdformat";
|
||||
version = "0.7.13";
|
||||
version = "0.7.14";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
@ -21,7 +21,7 @@ buildPythonPackage rec {
|
||||
owner = "executablebooks";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "sha256-9ssDe7Wjuwuq2j7xwRyLqKouqeIt6NCUbEXjPdu2VZ8=";
|
||||
sha256 = "sha256-bImBW6r8g/4MQ9yNrBBhk7AGqKRXFyAew6HHEmqelxw=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "meshtastic";
|
||||
version = "1.2.90";
|
||||
version = "1.2.92";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
@ -27,7 +27,7 @@ buildPythonPackage rec {
|
||||
owner = "meshtastic";
|
||||
repo = "Meshtastic-python";
|
||||
rev = version;
|
||||
sha256 = "sha256-n/M1Q6YS3EkUcn45ffiTy0wuj9yKf6qBLLfD2XJkhHU=";
|
||||
sha256 = "sha256-tK711Lewr5Zc6dy/cDe9UEnq9zOEvuJg4mZyO3zBLR0=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -9,6 +9,7 @@
|
||||
, tls-parser
|
||||
, cacert
|
||||
, pytestCheckHook
|
||||
, pythonAtLeast
|
||||
, pythonOlder
|
||||
}:
|
||||
|
||||
@ -50,9 +51,11 @@ let
|
||||
"enable-tls1_3"
|
||||
"no-async"
|
||||
];
|
||||
patches = builtins.filter (
|
||||
p: (builtins.baseNameOf (toString p)) != "macos-yosemite-compat.patch"
|
||||
) oldAttrs.patches;
|
||||
patches = builtins.filter
|
||||
(
|
||||
p: (builtins.baseNameOf (toString p)) != "macos-yosemite-compat.patch"
|
||||
)
|
||||
oldAttrs.patches;
|
||||
buildInputs = oldAttrs.buildInputs ++ [ zlibStatic cacert ];
|
||||
meta = oldAttrs.meta // {
|
||||
knownVulnerabilities = [
|
||||
@ -76,9 +79,11 @@ let
|
||||
sha256 = "1zqb1rff1wikc62a7vj5qxd1k191m8qif5d05mwdxz2wnzywlg72";
|
||||
};
|
||||
configureFlags = oldAttrs.configureFlags ++ nasslOpensslFlagsCommon;
|
||||
patches = builtins.filter (
|
||||
p: (builtins.baseNameOf (toString p)) == "darwin64-arm64.patch"
|
||||
) oldAttrs.patches;
|
||||
patches = builtins.filter
|
||||
(
|
||||
p: (builtins.baseNameOf (toString p)) == "darwin64-arm64.patch"
|
||||
)
|
||||
oldAttrs.patches;
|
||||
buildInputs = oldAttrs.buildInputs ++ [ zlibStatic ];
|
||||
# openssl_1_0_2 needs `withDocs = false`
|
||||
outputs = lib.remove "doc" oldAttrs.outputs;
|
||||
@ -87,42 +92,54 @@ let
|
||||
in
|
||||
buildPythonPackage rec {
|
||||
pname = "nassl";
|
||||
version = "4.0.1";
|
||||
version = "4.0.2";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "nabla-c0d3";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
hash = "sha256-QzO7ABh2weBO6NVFIj7kZpS8ashbDGompuvdKteJeUc=";
|
||||
hash = "sha256-lLyHXLmBVvT+LgsKBU8DcUXd0qaLSrwvXxFnIB9CHcU=";
|
||||
};
|
||||
|
||||
postPatch = let
|
||||
legacyOpenSSLVersion = lib.replaceStrings ["."] ["_"] opensslLegacyStatic.version;
|
||||
modernOpenSSLVersion = lib.replaceStrings ["."] ["_"] opensslStatic.version;
|
||||
zlibVersion = zlibStatic.version;
|
||||
in ''
|
||||
mkdir -p deps/openssl-OpenSSL_${legacyOpenSSLVersion}/
|
||||
cp ${opensslLegacyStatic.out}/lib/libssl.a \
|
||||
${opensslLegacyStatic.out}/lib/libcrypto.a \
|
||||
deps/openssl-OpenSSL_${legacyOpenSSLVersion}/
|
||||
ln -s ${opensslLegacyStatic.out.dev}/include deps/openssl-OpenSSL_${legacyOpenSSLVersion}/include
|
||||
ln -s ${opensslLegacyStatic.bin}/bin deps/openssl-OpenSSL_${legacyOpenSSLVersion}/apps
|
||||
postPatch =
|
||||
let
|
||||
legacyOpenSSLVersion = lib.replaceStrings [ "." ] [ "_" ] opensslLegacyStatic.version;
|
||||
modernOpenSSLVersion = lib.replaceStrings [ "." ] [ "_" ] opensslStatic.version;
|
||||
zlibVersion = zlibStatic.version;
|
||||
in
|
||||
''
|
||||
mkdir -p deps/openssl-OpenSSL_${legacyOpenSSLVersion}/
|
||||
cp ${opensslLegacyStatic.out}/lib/libssl.a \
|
||||
${opensslLegacyStatic.out}/lib/libcrypto.a \
|
||||
deps/openssl-OpenSSL_${legacyOpenSSLVersion}/
|
||||
ln -s ${opensslLegacyStatic.out.dev}/include deps/openssl-OpenSSL_${legacyOpenSSLVersion}/include
|
||||
ln -s ${opensslLegacyStatic.bin}/bin deps/openssl-OpenSSL_${legacyOpenSSLVersion}/apps
|
||||
|
||||
mkdir -p deps/openssl-OpenSSL_${modernOpenSSLVersion}/
|
||||
cp ${opensslStatic.out}/lib/libssl.a \
|
||||
${opensslStatic.out}/lib/libcrypto.a \
|
||||
deps/openssl-OpenSSL_${modernOpenSSLVersion}/
|
||||
ln -s ${opensslStatic.out.dev}/include deps/openssl-OpenSSL_${modernOpenSSLVersion}/include
|
||||
ln -s ${opensslStatic.bin}/bin deps/openssl-OpenSSL_${modernOpenSSLVersion}/apps
|
||||
mkdir -p deps/openssl-OpenSSL_${modernOpenSSLVersion}/
|
||||
cp ${opensslStatic.out}/lib/libssl.a \
|
||||
${opensslStatic.out}/lib/libcrypto.a \
|
||||
deps/openssl-OpenSSL_${modernOpenSSLVersion}/
|
||||
ln -s ${opensslStatic.out.dev}/include deps/openssl-OpenSSL_${modernOpenSSLVersion}/include
|
||||
ln -s ${opensslStatic.bin}/bin deps/openssl-OpenSSL_${modernOpenSSLVersion}/apps
|
||||
|
||||
mkdir -p deps/zlib-${zlibVersion}/
|
||||
cp ${zlibStatic.out}/lib/libz.a deps/zlib-${zlibVersion}/
|
||||
'';
|
||||
mkdir -p deps/zlib-${zlibVersion}/
|
||||
cp ${zlibStatic.out}/lib/libz.a deps/zlib-${zlibVersion}/
|
||||
'';
|
||||
|
||||
propagatedBuildInputs = [ tls-parser ];
|
||||
nativeBuildInputs = [
|
||||
invoke
|
||||
];
|
||||
|
||||
nativeBuildInputs = [ invoke ];
|
||||
propagatedBuildInputs = [
|
||||
tls-parser
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
pytestCheckHook
|
||||
];
|
||||
|
||||
buildPhase = ''
|
||||
invoke build.nassl
|
||||
@ -131,19 +148,23 @@ buildPythonPackage rec {
|
||||
|
||||
doCheck = true;
|
||||
|
||||
pythonImportsCheck = [ "nassl" ];
|
||||
|
||||
checkInputs = [ pytestCheckHook ];
|
||||
pythonImportsCheck = [
|
||||
"nassl"
|
||||
];
|
||||
|
||||
disabledTests = [
|
||||
"Online"
|
||||
] ++ lib.optionals (pythonAtLeast "3.10") [
|
||||
"test_write_bad"
|
||||
"test_client_authentication_no_certificate_supplied"
|
||||
"test_client_authentication_succeeds"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Low-level OpenSSL wrapper for Python";
|
||||
homepage = "https://github.com/nabla-c0d3/nassl";
|
||||
description = "Low-level OpenSSL wrapper for Python 3.7+";
|
||||
platforms = with platforms; linux ++ darwin;
|
||||
license = licenses.agpl3Only;
|
||||
maintainers = with maintainers; [ veehaitch ];
|
||||
platforms = with platforms; linux ++ darwin;
|
||||
};
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "neo4j-driver";
|
||||
version = "4.4.1";
|
||||
version = "4.4.2";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
@ -16,7 +16,7 @@ buildPythonPackage rec {
|
||||
owner = "neo4j";
|
||||
repo = "neo4j-python-driver";
|
||||
rev = version;
|
||||
sha256 = "sha256-aGOqD6mmd3dulQ/SdaDPDZhkCwXdYCucHw+CrkJf1M0=";
|
||||
sha256 = "sha256-rYedmxQvT+RjVdbDckLv00J4YuEQtMuIc8Q5FGWr3Rw=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -11,14 +11,14 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "proxmoxer";
|
||||
version = "1.2.0";
|
||||
version = "1.3.0";
|
||||
disabled = pythonOlder "3.6";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = pname;
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "sha256-ElHocXrazwK+b5vdjYSJAYB4ajs2n+V8koj4QKkdDMQ=";
|
||||
sha256 = "sha256-3EpId20WVVjXA/wxwy1peyHPcXdiT3fprABkcNBpZtE=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -3,6 +3,7 @@
|
||||
, buildPythonPackage
|
||||
, fetchFromGitHub
|
||||
, loguru
|
||||
, pydantic
|
||||
, poetry-core
|
||||
, pythonOlder
|
||||
, requests
|
||||
@ -10,7 +11,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pyaussiebb";
|
||||
version = "0.0.11";
|
||||
version = "0.0.12";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.9";
|
||||
@ -19,7 +20,7 @@ buildPythonPackage rec {
|
||||
owner = "yaleman";
|
||||
repo = "aussiebb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-aL+n2ut7n6UUyymMEHoFMhRvK9iFRRunYE9ZirKFXhc=";
|
||||
hash = "sha256-4B+eq863G+iVl8UnxDumPVpkj9W8kX5LK0wo4QIYo4w=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -30,6 +31,7 @@ buildPythonPackage rec {
|
||||
aiohttp
|
||||
requests
|
||||
loguru
|
||||
pydantic
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pycep-parser";
|
||||
version = "0.3.1";
|
||||
version = "0.3.2";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
@ -21,7 +21,7 @@ buildPythonPackage rec {
|
||||
owner = "gruebel";
|
||||
repo = "pycep";
|
||||
rev = version;
|
||||
hash = "sha256-S4jBqMgyreWrEp1SuR8J5RVFc+i1O0xbfgux1UvFP5k=";
|
||||
hash = "sha256-ud26xJQWdu7wtv75/K16HSSw0MvaSr3H1hDZBPjSzYE=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -41,7 +41,7 @@ buildPythonPackage rec {
|
||||
|
||||
postPatch = ''
|
||||
substituteInPlace pyproject.toml \
|
||||
--replace 'version = "0.3.1-alpha.1"' 'version = "${version}"' \
|
||||
--replace 'version = "0.3.2-alpha.4"' 'version = "${version}"' \
|
||||
--replace 'regex = "^2022.3.2"' 'regex = "*"'
|
||||
'';
|
||||
|
||||
|
@ -7,13 +7,13 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pykrakenapi";
|
||||
version = "0.2.4";
|
||||
version = "0.3.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "dominiktraxl";
|
||||
repo = "pykrakenapi";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-i2r6t+JcL6INI8Y26gvVvNjv6XxMj4G+pF9Xf/hsx1A=";
|
||||
hash = "sha256-ZhP4TEWFEGIqI/nk2It1IVFKrX4HKP+dWxu+gLJNIeg=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pysigma-backend-splunk";
|
||||
version = "0.1.1";
|
||||
version = "0.1.2";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -18,7 +18,7 @@ buildPythonPackage rec {
|
||||
owner = "SigmaHQ";
|
||||
repo = "pySigma-backend-splunk";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-AGT+7BKtINe2ukmomYyoUa5PHYAH1N0tUTtbyjMD+kw=";
|
||||
hash = "sha256-jKvGBUO55DtF6bpgEL82XB5Ba+kmqJsCqUdzftcpSJ0=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pysigma-pipeline-crowdstrike";
|
||||
version = "0.1.3";
|
||||
version = "0.1.4";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -18,7 +18,7 @@ buildPythonPackage rec {
|
||||
owner = "SigmaHQ";
|
||||
repo = "pySigma-pipeline-crowdstrike";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-JNJHKydMzKreN+6liLlGMT1CFBUr/IX8Ah+exddKR3g=";
|
||||
hash = "sha256-Riu2u1IouS1BMtXauXrNMIl06TU11pHdC0jjlOiR71s=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pysigma-pipeline-sysmon";
|
||||
version = "0.1.1";
|
||||
version = "0.1.2";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -18,7 +18,7 @@ buildPythonPackage rec {
|
||||
owner = "SigmaHQ";
|
||||
repo = "pySigma-pipeline-sysmon";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-BBJt2SAbnPEzIwJ+tXW4NmA4Nrb/glIaPlnmYHLoMD0=";
|
||||
hash = "sha256-Y9X9/ynrfs4gVTLl7pOvK3TH2Eh2vNF1S6Cnt3tByJM=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pysigma";
|
||||
version = "0.3.2";
|
||||
version = "0.4.1";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -20,7 +20,7 @@ buildPythonPackage rec {
|
||||
owner = "SigmaHQ";
|
||||
repo = "pySigma";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-V/E2rZqVrk0kIvk+hPhNcAifhMM/rN3mk3pB+CGd43w=";
|
||||
hash = "sha256-egyzeniid2PZZQ6hsd44W+YURI8uGaXvDMuhNIXUqO0=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -36,21 +36,9 @@ buildPythonPackage rec {
|
||||
pytestCheckHook
|
||||
];
|
||||
|
||||
patches = [
|
||||
# Switch to poetry-core, https://github.com/SigmaHQ/pySigma/pull/31
|
||||
(fetchpatch {
|
||||
name = "switch-to-poetry-core.patch";
|
||||
url = "https://github.com/SigmaHQ/pySigma/commit/b7a852d18852007da90c2ec35bff347c97b36f07.patch";
|
||||
sha256 = "sha256-zgg8Bsc37W2uuQluFpIZT4jHCQaitY2ZgS93Wk6Hxt0=";
|
||||
})
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
# https://github.com/SigmaHQ/pySigma/issues/32
|
||||
# https://github.com/SigmaHQ/pySigma/issues/33
|
||||
substituteInPlace pyproject.toml \
|
||||
--replace 'pyparsing = "^2.4.7"' 'pyparsing = "*"' \
|
||||
--replace 'pyyaml = "^5.3.1"' 'pyyaml = "*"'
|
||||
--replace 'pyparsing = "^3.0.7"' 'pyparsing = "*"' \
|
||||
'';
|
||||
|
||||
pythonImportsCheck = [
|
||||
|
@ -5,17 +5,21 @@
|
||||
, pytest-metadata
|
||||
, pytest-xdist
|
||||
, pytestCheckHook
|
||||
, pythonOlder
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pytest-json-report";
|
||||
version = "1.4.1";
|
||||
version = "1.5.0";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.7";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "numirias";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-OS9ASUp9iJ12Ovr931RQU/DHEAXqbgcRMCBP4h+GAhk=";
|
||||
hash = "sha256-hMB/atDuo7CjwhHFUOxVfgJ7Qp4AA9J428iv7hyQFcs=";
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
|
@ -11,14 +11,16 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "python-box";
|
||||
version = "5.4.1";
|
||||
version = "6.0.0";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "cdgriffith";
|
||||
repo = "Box";
|
||||
rev = version;
|
||||
sha256 = "sha256-SkteajcWG7rBFMm6Xp6QCfkZfwthRituGL/RtICbtYk=";
|
||||
hash = "sha256-YOYcI+OAuTumNtTylUc6dSY9shOE6eTr8M3rVbcy5hs=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
@ -32,7 +34,9 @@ buildPythonPackage rec {
|
||||
pytestCheckHook
|
||||
];
|
||||
|
||||
pythonImportsCheck = [ "box" ];
|
||||
pythonImportsCheck = [
|
||||
"box"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Python dictionaries with advanced dot notation access";
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "python_http_client";
|
||||
version = "3.3.6";
|
||||
version = "3.3.7";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
@ -17,7 +17,7 @@ buildPythonPackage rec {
|
||||
owner = "sendgrid";
|
||||
repo = "python-http-client";
|
||||
rev = version;
|
||||
sha256 = "sha256-Xchf/jVkQ7SYOzI9f81iS/G72k//6wkl2bMvHprOP9Y=";
|
||||
sha256 = "sha256-8Qs5Jw0LMV2UucLnlFKJQ2PUhYaQx6uJdIV/4gaPH3w=";
|
||||
};
|
||||
|
||||
checkInputs = [
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "qcengine";
|
||||
version = "0.22.0";
|
||||
version = "0.23.0";
|
||||
|
||||
checkInputs = [ pytestCheckHook ];
|
||||
|
||||
@ -25,7 +25,7 @@ buildPythonPackage rec {
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "685a08247b561ed1c7a7b42e68293f90b412e83556626304a3f826a15be51308";
|
||||
sha256 = "sha256-gDn0Nu6ALTr3KyZnYDSA6RE3S5JQj562FP2RI9U3Gxs=";
|
||||
};
|
||||
|
||||
doCheck = true;
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "readme-renderer";
|
||||
version = "33.0";
|
||||
version = "34.0";
|
||||
format = "setuptools";
|
||||
|
||||
disabled = pythonOlder "3.6";
|
||||
@ -20,7 +20,7 @@ buildPythonPackage rec {
|
||||
src = fetchPypi {
|
||||
pname = "readme_renderer";
|
||||
inherit version;
|
||||
sha256 = "sha256-47U7yEvWrwVOTMH+NWfcGuGfVUE0IhBDo/jGdOIiCds=";
|
||||
sha256 = "sha256-37TRfyFwbRRfdHPgthyiRbpY6BDPmyIJpII5Z3+C5bA=";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user