cudaPackages: move config expressions to cuda-modules

This commit is contained in:
Connor Baker 2023-11-07 14:33:17 +00:00
parent 4a25023c2e
commit 397d95d07f
10 changed files with 1199 additions and 594 deletions

View File

@ -1,170 +0,0 @@
{ config
, lib
, cudaVersion
}:
# Type aliases
# Gpu :: AttrSet
# - See the documentation in ./gpus.nix.
let
inherit (lib) attrsets lists strings trivial versions;
# Flags are determined based on your CUDA toolkit by default. You may benefit
# from improved performance, reduced file size, or greater hardware support by
# passing a configuration based on your specific GPU environment.
#
# config.cudaCapabilities :: List Capability
# List of hardware generations to build.
# E.g. [ "8.0" ]
# Currently, the last item is considered the optional forward-compatibility arch,
# but this may change in the future.
#
# config.cudaForwardCompat :: Bool
# Whether to include the forward compatibility gencode (+PTX)
# to support future GPU generations.
# E.g. true
#
# Please see the accompanying documentation or https://github.com/NixOS/nixpkgs/pull/205351
# gpus :: List Gpu
gpus = builtins.import ./gpus.nix;
# isSupported :: Gpu -> Bool
isSupported = gpu:
let
inherit (gpu) minCudaVersion maxCudaVersion;
lowerBoundSatisfied = strings.versionAtLeast cudaVersion minCudaVersion;
upperBoundSatisfied = (maxCudaVersion == null)
|| !(strings.versionOlder maxCudaVersion cudaVersion);
in
lowerBoundSatisfied && upperBoundSatisfied;
# isDefault :: Gpu -> Bool
isDefault = gpu:
let
inherit (gpu) dontDefaultAfter;
newGpu = dontDefaultAfter == null;
recentGpu = newGpu || strings.versionAtLeast dontDefaultAfter cudaVersion;
in
recentGpu;
# supportedGpus :: List Gpu
# GPUs which are supported by the provided CUDA version.
supportedGpus = builtins.filter isSupported gpus;
# defaultGpus :: List Gpu
# GPUs which are supported by the provided CUDA version and we want to build for by default.
defaultGpus = builtins.filter isDefault supportedGpus;
# supportedCapabilities :: List Capability
supportedCapabilities = lists.map (gpu: gpu.computeCapability) supportedGpus;
# defaultCapabilities :: List Capability
# The default capabilities to target, if not overridden by the user.
defaultCapabilities = lists.map (gpu: gpu.computeCapability) defaultGpus;
# cudaArchNameToVersions :: AttrSet String (List String)
# Maps the name of a GPU architecture to different versions of that architecture.
# For example, "Ampere" maps to [ "8.0" "8.6" "8.7" ].
cudaArchNameToVersions =
lists.groupBy'
(versions: gpu: versions ++ [ gpu.computeCapability ])
[ ]
(gpu: gpu.archName)
supportedGpus;
# cudaComputeCapabilityToName :: AttrSet String String
# Maps the version of a GPU architecture to the name of that architecture.
# For example, "8.0" maps to "Ampere".
cudaComputeCapabilityToName = builtins.listToAttrs (
lists.map
(gpu: {
name = gpu.computeCapability;
value = gpu.archName;
})
supportedGpus
);
# dropDot :: String -> String
dropDot = ver: builtins.replaceStrings [ "." ] [ "" ] ver;
# archMapper :: String -> List String -> List String
# Maps a feature across a list of architecture versions to produce a list of architectures.
# For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "sm_80" "sm_86" "sm_87" ].
archMapper = feat: lists.map (computeCapability: "${feat}_${dropDot computeCapability}");
# gencodeMapper :: String -> List String -> List String
# Maps a feature across a list of architecture versions to produce a list of gencode arguments.
# For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "-gencode=arch=compute_80,code=sm_80"
# "-gencode=arch=compute_86,code=sm_86" "-gencode=arch=compute_87,code=sm_87" ].
gencodeMapper = feat: lists.map (
computeCapability:
"-gencode=arch=compute_${dropDot computeCapability},code=${feat}_${dropDot computeCapability}"
);
formatCapabilities = { cudaCapabilities, enableForwardCompat ? true }: rec {
inherit cudaCapabilities enableForwardCompat;
# archNames :: List String
# E.g. [ "Turing" "Ampere" ]
archNames = lists.unique (builtins.map (cap: cudaComputeCapabilityToName.${cap} or (throw "missing cuda compute capability")) cudaCapabilities);
# realArches :: List String
# The real architectures are physical architectures supported by the CUDA version.
# E.g. [ "sm_75" "sm_86" ]
realArches = archMapper "sm" cudaCapabilities;
# virtualArches :: List String
# The virtual architectures are typically used for forward compatibility, when trying to support
# an architecture newer than the CUDA version allows.
# E.g. [ "compute_75" "compute_86" ]
virtualArches = archMapper "compute" cudaCapabilities;
# arches :: List String
# By default, build for all supported architectures and forward compatibility via a virtual
# architecture for the newest supported architecture.
# E.g. [ "sm_75" "sm_86" "compute_86" ]
arches = realArches ++
lists.optional enableForwardCompat (lists.last virtualArches);
# gencode :: List String
# A list of CUDA gencode arguments to pass to NVCC.
# E.g. [ "-gencode=arch=compute_75,code=sm_75" ... "-gencode=arch=compute_86,code=compute_86" ]
gencode =
let
base = gencodeMapper "sm" cudaCapabilities;
forward = gencodeMapper "compute" [ (lists.last cudaCapabilities) ];
in
base ++ lib.optionals enableForwardCompat forward;
};
in
# When changing names or formats: pause, validate, and update the assert
assert (formatCapabilities { cudaCapabilities = [ "7.5" "8.6" ]; }) == {
cudaCapabilities = [ "7.5" "8.6" ];
enableForwardCompat = true;
archNames = [ "Turing" "Ampere" ];
realArches = [ "sm_75" "sm_86" ];
virtualArches = [ "compute_75" "compute_86" ];
arches = [ "sm_75" "sm_86" "compute_86" ];
gencode = [ "-gencode=arch=compute_75,code=sm_75" "-gencode=arch=compute_86,code=sm_86" "-gencode=arch=compute_86,code=compute_86" ];
};
{
# formatCapabilities :: { cudaCapabilities: List Capability, cudaForwardCompat: Boolean } -> { ... }
inherit formatCapabilities;
# cudaArchNameToVersions :: String => String
inherit cudaArchNameToVersions;
# cudaComputeCapabilityToName :: String => String
inherit cudaComputeCapabilityToName;
# dropDot :: String -> String
inherit dropDot;
} // formatCapabilities {
cudaCapabilities = config.cudaCapabilities or defaultCapabilities;
enableForwardCompat = config.cudaForwardCompat or true;
}

View File

@ -1,148 +0,0 @@
[
# Type alias
# Gpu = {
# archName: String
# - The name of the microarchitecture.
# computeCapability: String
# - The compute capability of the GPU.
# minCudaVersion: String
# - The minimum (inclusive) CUDA version that supports this GPU.
# dontDefaultAfter: null | String
# - The CUDA version after which to exclude this GPU from the list of default capabilities
# we build. null means we always include this GPU in the default capabilities if it is
# supported.
# maxCudaVersion: null | String
# - The maximum (exclusive) CUDA version that supports this GPU. null means there is no
# maximum.
# }
{
archName = "Kepler";
computeCapability = "3.0";
minCudaVersion = "10.0";
dontDefaultAfter = "10.2";
maxCudaVersion = "10.2";
}
{
archName = "Kepler";
computeCapability = "3.2";
minCudaVersion = "10.0";
dontDefaultAfter = "10.2";
maxCudaVersion = "10.2";
}
{
archName = "Kepler";
computeCapability = "3.5";
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = "11.8";
}
{
archName = "Kepler";
computeCapability = "3.7";
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = "11.8";
}
{
archName = "Maxwell";
computeCapability = "5.0";
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
archName = "Maxwell";
computeCapability = "5.2";
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
archName = "Maxwell";
computeCapability = "5.3";
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
archName = "Pascal";
computeCapability = "6.0";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Pascal";
computeCapability = "6.1";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Pascal";
computeCapability = "6.2";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Volta";
computeCapability = "7.0";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Volta";
computeCapability = "7.2";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Turing";
computeCapability = "7.5";
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Ampere";
computeCapability = "8.0";
minCudaVersion = "11.2";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Ampere";
computeCapability = "8.6";
minCudaVersion = "11.2";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Ampere";
computeCapability = "8.7";
minCudaVersion = "11.5";
# NOTE: This is purposefully before 11.5 to ensure it is never a capability we target by
# default. 8.7 is the Jetson Orin series of devices which are a very specific platform.
# We keep this entry here in case we ever want to target it explicitly, but we don't
# want to target it by default.
dontDefaultAfter = "11.4";
maxCudaVersion = null;
}
{
archName = "Ada";
computeCapability = "8.9";
minCudaVersion = "11.8";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
archName = "Hopper";
computeCapability = "9.0";
minCudaVersion = "11.8";
dontDefaultAfter = null;
maxCudaVersion = null;
}
]

View File

@ -1,91 +0,0 @@
["10.0"]
version = "10.0.130"
url = "https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux"
sha256 = "16p3bv1lwmyqpxil8r951h385sy9asc578afrc7lssa68c71ydcj"
gcc = "gcc7"
["10.1"]
version = "10.1.243"
url = "https://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.243_418.87.00_linux.run"
sha256 = "0caxhlv2bdq863dfp6wj7nad66ml81vasq2ayf11psvq2b12vhp7"
gcc = "gcc7"
["10.2"]
version = "10.2.89"
url = "http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run"
sha256 = "04fasl9sjkb1jvchvqgaqxprnprcz7a8r52249zp2ijarzyhf3an"
gcc = "gcc7"
["11.0"]
version = "11.0.3"
url = "https://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda_11.0.3_450.51.06_linux.run"
sha256 = "1h4c69nfrgm09jzv8xjnjcvpq8n4gnlii17v3wzqry5d13jc8ydh"
gcc = "gcc9"
["11.1"]
version = "11.1.1"
url = "https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda_11.1.1_455.32.00_linux.run"
sha256 = "13yxv2fgvdnqqbwh1zb80x4xhyfkbajfkwyfpdg9493010kngbiy"
gcc = "gcc9"
["11.2"]
version = "11.2.1"
url = "https://developer.download.nvidia.com/compute/cuda/11.2.1/local_installers/cuda_11.2.1_460.32.03_linux.run"
sha256 = "sha256-HamMuJfMX1inRFpKZspPaSaGdwbLOvWKZpzc2Nw9F8g="
gcc = "gcc9"
["11.3"]
version = "11.3.1"
url = "https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run"
sha256 = "0d19pwcqin76scbw1s5kgj8n0z1p4v1hyfldqmamilyfxycfm4xd"
gcc = "gcc9"
["11.4"]
version = "11.4.2"
url = "https://developer.download.nvidia.com/compute/cuda/11.4.2/local_installers/cuda_11.4.2_470.57.02_linux.run"
sha256 = "sha256-u9h8oOkT+DdFSnljZ0c1E83e9VUILk2G7Zo4ZZzIHwo="
gcc = "gcc10"
["11.5"]
version = "11.5.0"
url = "https://developer.download.nvidia.com/compute/cuda/11.5.0/local_installers/cuda_11.5.0_495.29.05_linux.run"
sha256 = "sha256-rgoWk9lJfPPYHmlIlD43lGNpANtxyY1Y7v2sr38aHkw="
# cuda 11.5 has problems with glibc 2.4 -> keeping gcc10
# cf. https://forums.developer.nvidia.com/t/cuda-11-5-samples-throw-multiple-error-attribute-malloc-does-not-take-arguments/192750/15
gcc = "gcc10"
["11.6"]
version = "11.6.1"
url = "https://developer.download.nvidia.com/compute/cuda/11.6.1/local_installers/cuda_11.6.1_510.47.03_linux.run"
sha256 = "sha256-qyGa/OALdCABEyaYZvv/derQN7z8I1UagzjCaEyYTX4="
gcc = "gcc11"
["11.7"]
version = "11.7.0"
url = "https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run"
sha256 = "sha256-CH/fy7ofeVQ7H3jkOo39rF9tskLQQt3oIOFtwYWJLyY="
gcc = "gcc11"
["11.8"]
version = "11.8.0"
url = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
sha256 = "sha256-kiPErzrr5Ke77Zq9mxY7A6GzS4VfvCtKDRtwasCaWhY="
gcc = "gcc11"
["12.0"]
version = "12.0.1"
url = "https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run"
sha256 = "sha256-GyBaBicvFGP0dydv2rkD8/ZmkXwGjlIHOAAeacehh1s="
gcc = "gcc12"
["12.1"]
version = "12.1.1"
url = "https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run"
sha256 = "sha256-10Ai1B2AEFMZ36Ib7qObd6W5kZU5wEh6BcqvJEbWpw4="
gcc = "gcc12"
["12.2"]
version = "12.2.0"
url = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
sha256 = "sha256-7PPSr63LrAKfD0UFeFgQ1S0AbkuHunn/P5hDNqK79Rg="
gcc = "gcc12"

View File

@ -0,0 +1,102 @@
# Type Aliases
# CudaVersion = String (two-component version, e.g. "10.0")
# Release = {
# version: String
# - The version of CUDA.
# url: String
# - The URL to download the CUDA installer from.
# sha256: String
# - The SHA256 checksum of the CUDA installer.
# }
# Releases = AttrSet CudaVersion Release
{
"10.0" = {
version = "10.0.130";
url = "https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux";
sha256 = "16p3bv1lwmyqpxil8r951h385sy9asc578afrc7lssa68c71ydcj";
};
"10.1" = {
version = "10.1.243";
url = "https://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.243_418.87.00_linux.run";
sha256 = "0caxhlv2bdq863dfp6wj7nad66ml81vasq2ayf11psvq2b12vhp7";
};
"10.2" = {
version = "10.2.89";
url = "http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run";
sha256 = "04fasl9sjkb1jvchvqgaqxprnprcz7a8r52249zp2ijarzyhf3an";
};
"11.0" = {
version = "11.0.3";
url = "https://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda_11.0.3_450.51.06_linux.run";
sha256 = "1h4c69nfrgm09jzv8xjnjcvpq8n4gnlii17v3wzqry5d13jc8ydh";
};
"11.1" = {
version = "11.1.1";
url = "https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda_11.1.1_455.32.00_linux.run";
sha256 = "13yxv2fgvdnqqbwh1zb80x4xhyfkbajfkwyfpdg9493010kngbiy";
};
"11.2" = {
version = "11.2.1";
url = "https://developer.download.nvidia.com/compute/cuda/11.2.1/local_installers/cuda_11.2.1_460.32.03_linux.run";
sha256 = "sha256-HamMuJfMX1inRFpKZspPaSaGdwbLOvWKZpzc2Nw9F8g=";
};
"11.3" = {
version = "11.3.1";
url = "https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run";
sha256 = "0d19pwcqin76scbw1s5kgj8n0z1p4v1hyfldqmamilyfxycfm4xd";
};
"11.4" = {
version = "11.4.2";
url = "https://developer.download.nvidia.com/compute/cuda/11.4.2/local_installers/cuda_11.4.2_470.57.02_linux.run";
sha256 = "sha256-u9h8oOkT+DdFSnljZ0c1E83e9VUILk2G7Zo4ZZzIHwo=";
};
"11.5" = {
version = "11.5.0";
url = "https://developer.download.nvidia.com/compute/cuda/11.5.0/local_installers/cuda_11.5.0_495.29.05_linux.run";
sha256 = "sha256-rgoWk9lJfPPYHmlIlD43lGNpANtxyY1Y7v2sr38aHkw=";
};
"11.6" = {
version = "11.6.1";
url = "https://developer.download.nvidia.com/compute/cuda/11.6.1/local_installers/cuda_11.6.1_510.47.03_linux.run";
sha256 = "sha256-qyGa/OALdCABEyaYZvv/derQN7z8I1UagzjCaEyYTX4=";
};
"11.7" = {
version = "11.7.0";
url = "https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run";
sha256 = "sha256-CH/fy7ofeVQ7H3jkOo39rF9tskLQQt3oIOFtwYWJLyY=";
};
"11.8" = {
version = "11.8.0";
url = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run";
sha256 = "sha256-kiPErzrr5Ke77Zq9mxY7A6GzS4VfvCtKDRtwasCaWhY=";
};
"12.0" = {
version = "12.0.1";
url = "https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run";
sha256 = "sha256-GyBaBicvFGP0dydv2rkD8/ZmkXwGjlIHOAAeacehh1s=";
};
"12.1" = {
version = "12.1.1";
url = "https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run";
sha256 = "sha256-10Ai1B2AEFMZ36Ib7qObd6W5kZU5wEh6BcqvJEbWpw4=";
};
"12.2" = {
version = "12.2.0";
url = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run";
sha256 = "sha256-7PPSr63LrAKfD0UFeFgQ1S0AbkuHunn/P5hDNqK79Rg=";
};
}

View File

@ -0,0 +1,262 @@
# NOTE: Check https://docs.nvidia.com/deeplearning/cudnn/archives/index.html for support matrices.
# Version policy is to keep the latest minor release for each major release.
{
cudnn.releases = {
# jetson
linux-aarch64 = [
{
version = "8.9.5.30";
minCudaVersion = "12.0";
maxCudaVersion = "12.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-aarch64/cudnn-linux-aarch64-8.9.5.30_cuda12-archive.tar.xz";
hash = "sha256-BJH3sC9VwiB362eL8xTB+RdSS9UHz1tlgjm/mKRyM6E=";
}
];
# powerpc
linux-ppc64le = [];
# server-grade arm
linux-sbsa = [
{
version = "8.4.1.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.4.1.50_cuda11.6-archive.tar.xz";
hash = "sha256-CxufrFt4l04v2qp0hD2xj2Ns6PPZmdYv8qYVuZePw2A=";
}
{
version = "8.5.0.96";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.5.0.96_cuda11-archive.tar.xz";
hash = "sha256-hngKu+zUY05zY/rR0ACuI7eQWl+Dg73b9zMsaTR5Hd4=";
}
{
version = "8.6.0.163";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.6.0.163_cuda11-archive.tar.xz";
hash = "sha256-oCAieNPL1POtw/eBa/9gcWIcsEKwkDaYtHesrIkorAY=";
}
{
version = "8.7.0.84";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.7.0.84_cuda11-archive.tar.xz";
hash = "sha256-z5Z/eNv2wHUkPMg6oYdZ43DbN1SqFbEqChTov2ejqdQ=";
}
{
version = "8.8.1.3";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.8.1.3_cuda11-archive.tar.xz";
hash = "sha256-OzWq+aQkmIbZONmWSYyFoZzem3RldoXyJy7GVT6GM1k=";
}
{
version = "8.8.1.3";
minCudaVersion = "12.0";
maxCudaVersion = "12.0";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.8.1.3_cuda12-archive.tar.xz";
hash = "sha256-njl3qhudBuuGC1gqyJM2MGdaAkMCnCWb/sW7VpmGfSA=";
}
{
version = "8.9.6.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.9.6.50_cuda11-archive.tar.xz";
hash = "sha256-nlQWYSOJWci7o3wFGIuxrkoo8d3ddg4F2hU/qJySvBE=";
}
{
version = "8.9.6.50";
minCudaVersion = "12.0";
maxCudaVersion = "12.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.9.6.50_cuda12-archive.tar.xz";
hash = "sha256-L20O26RelmeynVfjohEADW3Vaj3VbFS2dTUadTKlXdg=";
}
];
# x86_64
linux-x86_64 = [
{
version = "7.4.2.24";
minCudaVersion = "10.0";
maxCudaVersion = "10.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.4.2/cudnn-10.0-linux-x64-v7.4.2.24.tgz";
hash = "sha256-Lt/IagK1DRfojEeJVaMy5qHoF05+U6NFi06lH68C2qM=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.0";
maxCudaVersion = "10.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.0-linux-x64-v7.6.5.32.tgz";
hash = "sha256-KDVeOV8LK5OsLIO2E2CzW6bNA3fkTni+GXtrYbS0kro=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.1";
maxCudaVersion = "10.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.1-linux-x64-v7.6.5.32.tgz";
hash = "sha256-fq7IA5osMKsLx1jTA1iHZ2k972v0myJIWiwAvy4TbLM=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.2-linux-x64-v7.6.5.32.tgz";
hash = "sha256-fq7IA5osMKsLx1jTA1iHZ2k972v0myJIWiwAvy4TbLN=";
}
{
version = "8.0.5.39";
minCudaVersion = "10.1";
maxCudaVersion = "10.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.1-linux-x64-v8.0.5.39.tgz";
hash = "sha256-kJCElSmIlrM6qVBjo0cfk8NmJ9esAcF9w211xl7qSgA=";
}
{
version = "8.0.5.39";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.2-linux-x64-v8.0.5.39.tgz";
hash = "sha256-IfhMBcZ78eyFnnfDjM1b8VSWT6HDCPRJlZvkw1bjgvM=";
}
{
version = "8.0.5.39";
minCudaVersion = "11.0";
maxCudaVersion = "11.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.0-linux-x64-v8.0.5.39.tgz";
hash = "sha256-ThbueJXetKixwZS4ErpJWG730mkCBRQB03F1EYmKm3M=";
}
{
version = "8.0.5.39";
minCudaVersion = "11.1";
maxCudaVersion = "11.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.1-linux-x64-v8.0.5.39.tgz";
hash = "sha256-HQRr+nk5navMb2yxUHkYdUQ5RC6gyp4Pvs3URvmwDM4=";
}
{
version = "8.1.1.33";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-10.2-linux-x64-v8.1.1.33.tgz";
hash = "sha256-Kkp7mabpv6aQ6xm7QeSVU/KnpJGls6v8rpAOFmxbbr0=";
}
{
version = "8.1.1.33";
minCudaVersion = "11.0";
maxCudaVersion = "11.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-11.2-linux-x64-v8.1.1.33.tgz";
hash = "sha256-mKh4TpKGLyABjSDCgbMNSgzZUfk2lPZDPM9K6cUCumo=";
}
{
version = "8.2.4.15";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.4/cudnn-10.2-linux-x64-v8.2.4.15.tgz";
hash = "sha256-0jyUoxFaHHcRamwSfZF1+/WfcjNkN08mo0aZB18yIvE=";
}
{
version = "8.2.4.15";
minCudaVersion = "11.0";
maxCudaVersion = "11.4";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.4/cudnn-11.4-linux-x64-v8.2.4.15.tgz";
hash = "sha256-Dl0t+JC5ln76ZhnaQhMQ2XMjVlp58FoajLm3Fluq0Nc=";
}
{
version = "8.3.3.40";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.3/local_installers/10.2/cudnn-linux-x86_64-8.3.3.40_cuda10.2-archive.tar.xz";
hash = "sha256-2FVPKzLmKV1fyPOsJeaPlAWLAYyAHaucFD42gS+JJqs=";
}
{
version = "8.3.3.40";
minCudaVersion = "11.0";
maxCudaVersion = "11.6";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.3/local_installers/11.5/cudnn-linux-x86_64-8.3.3.40_cuda11.5-archive.tar.xz";
hash = "sha256-6r6Wx1zwPqT1N5iU2RTx+K4UzqsSGYnoSwg22Sf7dzE=";
}
{
version = "8.4.1.50";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.4.1.50_cuda10.2-archive.tar.xz";
hash = "sha256-I88qMmU6lIiLVmaPuX7TTbisgTav839mssxUo3lQNjg=";
}
{
version = "8.4.1.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz";
hash = "sha256-7JbSN22B/KQr3T1MPXBambKaBlurV/kgVhx2PinGfQE=";
}
{
version = "8.5.0.96";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.5.0.96_cuda10-archive.tar.xz";
hash = "sha256-1mzhbbzR40WKkHnQLtJHhg0vYgf7G8a0OBcCwIOkJjM=";
}
{
version = "8.5.0.96";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.5.0.96_cuda11-archive.tar.xz";
hash = "sha256-VFSm/ZTwCHKMqumtrZk8ToXvNjAuJrzkO+p9RYpee20=";
}
{
version = "8.6.0.163";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.6.0.163_cuda10-archive.tar.xz";
hash = "sha256-t4sr/GrFqqdxu2VhaJQk5K1Xm/0lU4chXG8hVL09R9k=";
}
{
version = "8.6.0.163";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.6.0.163_cuda11-archive.tar.xz";
hash = "sha256-u8OW30cpTGV+3AnGAGdNYIyxv8gLgtz0VHBgwhcRFZ4=";
}
{
version = "8.7.0.84";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.7.0.84_cuda10-archive.tar.xz";
hash = "sha256-bZhaqc8+GbPV2FQvvbbufd8VnEJgvfkICc2N3/gitRg=";
}
{
version = "8.7.0.84";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz";
hash = "sha256-l2xMunIzyXrnQAavq1Fyl2MAukD1slCiH4z3H1nJ920=";
}
{
version = "8.8.1.3";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.8.1.3_cuda11-archive.tar.xz";
hash = "sha256-r3WEyuDMVSS1kT7wjCm6YVQRPGDrCjegWQqRtRWoqPk=";
}
{
version = "8.8.1.3";
minCudaVersion = "12.0";
maxCudaVersion = "12.0";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.8.1.3_cuda12-archive.tar.xz";
hash = "sha256-edd6dpx+cXWrx7XC7VxJQUjAYYqGQThyLIh/lcYjd3w=";
}
{
version = "8.9.6.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.6.50_cuda11-archive.tar.xz";
hash = "sha256-oOLvVemfTNZH99HaqlqkUE/6M1ujAYbVwyiPL0ffBX4=";
}
{
version = "8.9.6.50";
minCudaVersion = "12.0";
maxCudaVersion = "12.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.6.50_cuda12-archive.tar.xz";
hash = "sha256-FyIlnblSZbs4E0OKWhxuzZed6JrkU2YDkEBC4STTAtU=";
}
];
};
}

View File

@ -0,0 +1,387 @@
# Type aliases
# Gpu :: AttrSet
# - See the documentation in ./gpus.nix.
{
config,
cudaCapabilities ? (config.cudaCapabilities or []),
cudaForwardCompat ? (config.cudaForwardCompat or true),
lib,
cudaVersion,
hostPlatform,
# gpus :: List Gpu
gpus,
}:
let
inherit (lib)
asserts
attrsets
lists
strings
trivial
;
# Flags are determined based on your CUDA toolkit by default. You may benefit
# from improved performance, reduced file size, or greater hardware support by
# passing a configuration based on your specific GPU environment.
#
# cudaCapabilities :: List Capability
# List of hardware generations to build.
# E.g. [ "8.0" ]
# Currently, the last item is considered the optional forward-compatibility arch,
# but this may change in the future.
#
# cudaForwardCompat :: Bool
# Whether to include the forward compatibility gencode (+PTX)
# to support future GPU generations.
# E.g. true
#
# Please see the accompanying documentation or https://github.com/NixOS/nixpkgs/pull/205351
# isSupported :: Gpu -> Bool
isSupported =
gpu:
let
inherit (gpu) minCudaVersion maxCudaVersion;
lowerBoundSatisfied = strings.versionAtLeast cudaVersion minCudaVersion;
upperBoundSatisfied =
(maxCudaVersion == null) || !(strings.versionOlder maxCudaVersion cudaVersion);
in
lowerBoundSatisfied && upperBoundSatisfied;
# NOTE: Jetson is never built by default.
# isDefault :: Gpu -> Bool
isDefault =
gpu:
let
inherit (gpu) dontDefaultAfter;
newGpu = dontDefaultAfter == null;
recentGpu = newGpu || strings.versionAtLeast dontDefaultAfter cudaVersion;
in
recentGpu;
# supportedGpus :: List Gpu
# GPUs which are supported by the provided CUDA version.
supportedGpus = builtins.filter isSupported gpus;
# defaultGpus :: List Gpu
# GPUs which are supported by the provided CUDA version and we want to build for by default.
defaultGpus = builtins.filter isDefault supportedGpus;
# supportedCapabilities :: List Capability
supportedCapabilities = lists.map (gpu: gpu.computeCapability) supportedGpus;
# defaultCapabilities :: List Capability
# The default capabilities to target, if not overridden by the user.
defaultCapabilities = lists.map (gpu: gpu.computeCapability) defaultGpus;
# cudaArchNameToVersions :: AttrSet String (List String)
# Maps the name of a GPU architecture to different versions of that architecture.
# For example, "Ampere" maps to [ "8.0" "8.6" "8.7" ].
cudaArchNameToVersions =
lists.groupBy' (versions: gpu: versions ++ [gpu.computeCapability]) [] (gpu: gpu.archName)
supportedGpus;
# cudaComputeCapabilityToName :: AttrSet String String
# Maps the version of a GPU architecture to the name of that architecture.
# For example, "8.0" maps to "Ampere".
cudaComputeCapabilityToName = builtins.listToAttrs (
lists.map (gpu: attrsets.nameValuePair gpu.computeCapability gpu.archName) supportedGpus
);
# cudaComputeCapabilityToIsJetson :: AttrSet String Boolean
cudaComputeCapabilityToIsJetson = builtins.listToAttrs (
lists.map (attrs: attrsets.nameValuePair attrs.computeCapability attrs.isJetson) supportedGpus
);
# jetsonComputeCapabilities :: List String
jetsonComputeCapabilities = trivial.pipe cudaComputeCapabilityToIsJetson [
(attrsets.filterAttrs (_: isJetson: isJetson))
builtins.attrNames
];
# Find the intersection with the user-specified list of cudaCapabilities.
# NOTE: Jetson devices are never built by default because they cannot be targeted along
# non-Jetson devices and require an aarch64 host platform. As such, if they're present anywhere,
# they must be in the user-specified cudaCapabilities.
# NOTE: We don't need to worry about mixes of Jetson and non-Jetson devices here -- there's
# sanity-checking for all that in cudaFlags.
jetsonTargets = lists.intersectLists jetsonComputeCapabilities cudaCapabilities;
# dropDot :: String -> String
dropDot = ver: builtins.replaceStrings ["."] [""] ver;
# archMapper :: String -> List String -> List String
# Maps a feature across a list of architecture versions to produce a list of architectures.
# For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "sm_80" "sm_86" "sm_87" ].
archMapper = feat: lists.map (computeCapability: "${feat}_${dropDot computeCapability}");
# gencodeMapper :: String -> List String -> List String
# Maps a feature across a list of architecture versions to produce a list of gencode arguments.
# For example, "sm" and [ "8.0" "8.6" "8.7" ] produces [ "-gencode=arch=compute_80,code=sm_80"
# "-gencode=arch=compute_86,code=sm_86" "-gencode=arch=compute_87,code=sm_87" ].
gencodeMapper =
feat:
lists.map (
computeCapability:
"-gencode=arch=compute_${dropDot computeCapability},code=${feat}_${dropDot computeCapability}"
);
# Maps Nix system to NVIDIA redist arch.
# NOTE: We swap out the default `linux-sbsa` redist (for server-grade ARM chips) with the
# `linux-aarch64` redist (which is for Jetson devices) if we're building any Jetson devices.
# Since both are based on aarch64, we can only have one or the other, otherwise there's an
# ambiguity as to which should be used.
# getRedistArch :: String -> String
getRedistArch =
nixSystem:
if nixSystem == "aarch64-linux" then
if jetsonTargets != [] then "linux-aarch64" else "linux-sbsa"
else if nixSystem == "x86_64-linux" then
"linux-x86_64"
else if nixSystem == "ppc64le-linux" then
"linux-ppc64le"
else if nixSystem == "x86_64-windows" then
"windows-x86_64"
else
builtins.throw "Unsupported Nix system: ${nixSystem}";
# Maps NVIDIA redist arch to Nix system.
getNixSystem =
redistArch:
if
lists.elem redistArch [
"linux-aarch64"
"linux-sbsa"
]
then
"aarch64-linux"
else if redistArch == "linux-x86_64" then
"x86_64-linux"
else if redistArch == "linux-ppc64le" then
"ppc64le-linux"
else if redistArch == "windows-x86_64" then
"x86_64-windows"
else
builtins.throw "Unsupported NVIDIA redist arch: ${redistArch}";
formatCapabilities =
{
cudaCapabilities,
enableForwardCompat ? true,
}:
rec {
inherit cudaCapabilities enableForwardCompat;
# archNames :: List String
# E.g. [ "Turing" "Ampere" ]
archNames = lists.unique (
lists.map (cap: cudaComputeCapabilityToName.${cap} or (throw "missing cuda compute capability"))
cudaCapabilities
);
# realArches :: List String
# The real architectures are physical architectures supported by the CUDA version.
# E.g. [ "sm_75" "sm_86" ]
realArches = archMapper "sm" cudaCapabilities;
# virtualArches :: List String
# The virtual architectures are typically used for forward compatibility, when trying to support
# an architecture newer than the CUDA version allows.
# E.g. [ "compute_75" "compute_86" ]
virtualArches = archMapper "compute" cudaCapabilities;
# arches :: List String
# By default, build for all supported architectures and forward compatibility via a virtual
# architecture for the newest supported architecture.
# E.g. [ "sm_75" "sm_86" "compute_86" ]
arches = realArches ++ lists.optional enableForwardCompat (lists.last virtualArches);
# gencode :: List String
# A list of CUDA gencode arguments to pass to NVCC.
# E.g. [ "-gencode=arch=compute_75,code=sm_75" ... "-gencode=arch=compute_86,code=compute_86" ]
gencode =
let
base = gencodeMapper "sm" cudaCapabilities;
forward = gencodeMapper "compute" [(lists.last cudaCapabilities)];
in
base ++ lib.optionals enableForwardCompat forward;
# gencodeString :: String
# A space-separated string of CUDA gencode arguments to pass to NVCC.
# E.g. "-gencode=arch=compute_75,code=sm_75 ... -gencode=arch=compute_86,code=compute_86"
gencodeString = strings.concatStringsSep " " gencode;
# Jetson devices cannot be targeted by the same binaries which target non-Jetson devices. While
# NVIDIA provides both `linux-aarch64` and `linux-sbsa` packages, which both target `aarch64`,
# they are built with different settings and cannot be mixed.
# isJetsonBuild :: Boolean
isJetsonBuild =
let
# List of booleans representing whether any of the currently targeted capabilities are
# Jetson devices.
# isJetsons :: List Boolean
isJetsons =
lists.map (trivial.flip builtins.getAttr cudaComputeCapabilityToIsJetson)
cudaCapabilities;
anyJetsons = lists.any (trivial.id) isJetsons;
allJetsons = lists.all (trivial.id) isJetsons;
hostIsAarch64 = hostPlatform.isAarch64;
in
trivial.throwIfNot (anyJetsons -> (allJetsons && hostIsAarch64))
''
Jetson devices cannot be targeted with non-Jetson devices. Additionally, they require hostPlatform to be aarch64.
You requested ${builtins.toJSON cudaCapabilities} for host platform ${hostPlatform.system}.
Exactly one of the following must be true:
- All CUDA capabilities belong to Jetson devices (${trivial.boolToString allJetsons}) and the hostPlatform is aarch64 (${trivial.boolToString hostIsAarch64}).
- No CUDA capabilities belong to Jetson devices (${trivial.boolToString (!anyJetsons)}).
See ${./gpus.nix} for a list of architectures supported by this version of Nixpkgs.
''
allJetsons;
};
in
# When changing names or formats: pause, validate, and update the assert
assert let
expected = {
cudaCapabilities = [
"7.5"
"8.6"
];
enableForwardCompat = true;
archNames = [
"Turing"
"Ampere"
];
realArches = [
"sm_75"
"sm_86"
];
virtualArches = [
"compute_75"
"compute_86"
];
arches = [
"sm_75"
"sm_86"
"compute_86"
];
gencode = [
"-gencode=arch=compute_75,code=sm_75"
"-gencode=arch=compute_86,code=sm_86"
"-gencode=arch=compute_86,code=compute_86"
];
gencodeString = "-gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_86,code=compute_86";
isJetsonBuild = false;
};
actual = formatCapabilities {
cudaCapabilities = [
"7.5"
"8.6"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg (expected == actualWrapped) ''
This test should only fail when using a version of CUDA older than 11.2, the first to support
8.6.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check mixed Jetson and non-Jetson devices
assert let
expected = false;
actual = formatCapabilities {
cudaCapabilities = [
"7.2"
"7.5"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg (expected == actualWrapped) ''
Jetson devices capabilities cannot be mixed with non-jetson devices.
Capability 7.5 is non-Jetson and should not be allowed with Jetson 7.2.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check Jetson-only
assert let
expected = {
cudaCapabilities = [
"6.2"
"7.2"
];
enableForwardCompat = true;
archNames = [
"Pascal"
"Volta"
];
realArches = [
"sm_62"
"sm_72"
];
virtualArches = [
"compute_62"
"compute_72"
];
arches = [
"sm_62"
"sm_72"
"compute_72"
];
gencode = [
"-gencode=arch=compute_62,code=sm_62"
"-gencode=arch=compute_72,code=sm_72"
"-gencode=arch=compute_72,code=compute_72"
];
gencodeString = "-gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_72,code=sm_72 -gencode=arch=compute_72,code=compute_72";
isJetsonBuild = true;
};
actual = formatCapabilities {
cudaCapabilities = [
"6.2"
"7.2"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg
# We can't do this test unless we're targeting aarch64
(hostPlatform.isAarch64 -> (expected == actualWrapped))
''
Jetson devices can only be built with other Jetson devices.
Both 6.2 and 7.2 are Jetson devices.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
{
# formatCapabilities :: { cudaCapabilities: List Capability, enableForwardCompat: Boolean } -> { ... }
inherit formatCapabilities;
# cudaArchNameToVersions :: String => String
inherit cudaArchNameToVersions;
# cudaComputeCapabilityToName :: String => String
inherit cudaComputeCapabilityToName;
# dropDot :: String -> String
inherit dropDot;
inherit
defaultCapabilities
supportedCapabilities
jetsonComputeCapabilities
jetsonTargets
getNixSystem
getRedistArch
;
}
// formatCapabilities {
cudaCapabilities = if cudaCapabilities == [] then defaultCapabilities else cudaCapabilities;
enableForwardCompat = cudaForwardCompat;
}

View File

@ -0,0 +1,204 @@
# Type aliases
#
# Gpu = {
# archName: String
# - The name of the microarchitecture.
# computeCapability: String
# - The compute capability of the GPU.
# isJetson: Boolean
# - Whether a GPU is part of NVIDIA's line of Jetson embedded computers. This field is
# notable because it tells us what architecture to build for (as Jetson devices are
# aarch64).
# More on Jetson devices here:
# https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/
# NOTE: These architectures are only built upon request.
# minCudaVersion: String
# - The minimum (inclusive) CUDA version that supports this GPU.
# dontDefaultAfter: null | String
# - The CUDA version after which to exclude this GPU from the list of default capabilities
# we build. null means we always include this GPU in the default capabilities if it is
# supported.
# maxCudaVersion: null | String
# - The maximum (exclusive) CUDA version that supports this GPU. null means there is no
# maximum.
# }
#
# Many thanks to Arnon Shimoni for maintaining a list of these architectures and capabilities.
# Without your work, this would have been much more difficult.
# https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
[
{
# GeForce 700, GT-730
archName = "Kepler";
computeCapability = "3.0";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "10.2";
maxCudaVersion = "10.2";
}
{
archName = "Kepler";
computeCapability = "3.2";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "10.2";
maxCudaVersion = "10.2";
}
{
# Tesla K40
archName = "Kepler";
computeCapability = "3.5";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = "11.8";
}
{
# Tesla K80
archName = "Kepler";
computeCapability = "3.7";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = "11.8";
}
{
# Tesla/Quadro M series
archName = "Maxwell";
computeCapability = "5.0";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
# Quadro M6000 , GeForce 900, GTX-970, GTX-980, GTX Titan X
archName = "Maxwell";
computeCapability = "5.2";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
# Tegra (Jetson) TX1 / Tegra X1, Drive CX, Drive PX, Jetson Nano
archName = "Maxwell";
computeCapability = "5.3";
isJetson = true;
minCudaVersion = "10.0";
dontDefaultAfter = "11.0";
maxCudaVersion = null;
}
{
# Quadro GP100, Tesla P100, DGX-1 (Generic Pascal)
archName = "Pascal";
computeCapability = "6.0";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030 (GP108), GT 1010 (GP108) Titan Xp, Tesla
# P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2
archName = "Pascal";
computeCapability = "6.1";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# Integrated GPU on the NVIDIA Drive PX2, Tegra (Jetson) TX2
archName = "Pascal";
computeCapability = "6.2";
isJetson = true;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# DGX-1 with Volta, Tesla V100, GTX 1180 (GV104), Titan V, Quadro GV100
archName = "Volta";
computeCapability = "7.0";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# Jetson AGX Xavier, Drive AGX Pegasus, Xavier NX
archName = "Volta";
computeCapability = "7.2";
isJetson = true;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# GTX/RTX Turing GTX 1660 Ti, RTX 2060, RTX 2070, RTX 2080, Titan RTX, Quadro RTX 4000,
# Quadro RTX 5000, Quadro RTX 6000, Quadro RTX 8000, Quadro T1000/T2000, Tesla T4
archName = "Turing";
computeCapability = "7.5";
isJetson = false;
minCudaVersion = "10.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# NVIDIA A100 (the name “Tesla” has been dropped GA100), NVIDIA DGX-A100
archName = "Ampere";
computeCapability = "8.0";
isJetson = false;
minCudaVersion = "11.2";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# Tesla GA10x cards, RTX Ampere RTX 3080, GA102 RTX 3090, RTX A2000, A3000, RTX A4000,
# A5000, A6000, NVIDIA A40, GA106 RTX 3060, GA104 RTX 3070, GA107 RTX 3050, RTX A10, RTX
# A16, RTX A40, A2 Tensor Core GPU
archName = "Ampere";
computeCapability = "8.6";
isJetson = false;
minCudaVersion = "11.2";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# Jetson AGX Orin and Drive AGX Orin only
archName = "Ampere";
computeCapability = "8.7";
isJetson = true;
minCudaVersion = "11.5";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# NVIDIA GeForce RTX 4090, RTX 4080, RTX 6000, Tesla L40
archName = "Ada";
computeCapability = "8.9";
isJetson = false;
minCudaVersion = "11.8";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# NVIDIA H100 (GH100)
archName = "Hopper";
computeCapability = "9.0";
isJetson = false;
minCudaVersion = "11.8";
dontDefaultAfter = null;
maxCudaVersion = null;
}
{
# NVIDIA H100 (GH100) (Thor)
archName = "Hopper";
computeCapability = "9.0a";
isJetson = false;
minCudaVersion = "12.0";
dontDefaultAfter = null;
maxCudaVersion = null;
}
]

View File

@ -0,0 +1,115 @@
# Taken from
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#host-compiler-support-policy
#
# NVCC performs a version check on the host compilers major version and so newer minor versions
# of the compilers listed below will be supported, but major versions falling outside the range
# will not be supported.
#
# NOTE: These constraints don't apply to Jetson, which uses something else.
# NOTE: NVIDIA can and will add support for newer compilers even during patch releases.
# E.g.: CUDA 12.2.1 maxxed out with support for Clang 15.0; 12.2.2 added support for Clang 16.0.
# NOTE: Because all platforms NVIDIA supports use GCC and Clang, we omit the architectures here.
# Type Aliases
# CudaVersion = String (two-part version number, e.g. "11.2")
# Platform = String (e.g. "x86_64-linux")
# CompilerCompatibilities = {
# clangMaxMajorVersion = String (e.g. "15")
# clangMinMajorVersion = String (e.g. "7")
# gccMaxMajorVersion = String (e.g. "11")
# gccMinMajorVersion = String (e.g. "6")
# }
let
# attrs :: AttrSet CudaVersion CompilerCompatibilities
attrs = {
# Our baseline
# https://docs.nvidia.com/cuda/archive/10.0/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"10.0" = {
clangMaxMajorVersion = "6";
clangMinMajorVersion = "6";
gccMaxMajorVersion = "7";
gccMinMajorVersion = "5";
};
# Added support for Clang 7 and GCC 8
# https://docs.nvidia.com/cuda/archive/10.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"10.1" = attrs."10.0" // {
clangMaxMajorVersion = "7";
gccMaxMajorVersion = "8";
};
# Added clang 8
# https://docs.nvidia.com/cuda/archive/10.2/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"10.2" = attrs."10.1" // {
clangMaxMajorVersion = "8";
};
# Added support for Clang 9 and GCC 9
# https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"11.0" = {
clangMaxMajorVersion = "9";
clangMinMajorVersion = "7";
gccMaxMajorVersion = "9";
gccMinMajorVersion = "6";
};
# Added support for Clang 10 and GCC 10
# https://docs.nvidia.com/cuda/archive/11.1.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"11.1" = attrs."11.0" // {
clangMaxMajorVersion = "10";
gccMaxMajorVersion = "10";
};
# Added support for Clang 11
# https://docs.nvidia.com/cuda/archive/11.2.2/cuda-installation-guide-linux/index.html#system-requirements
"11.2" = attrs."11.1" // {
clangMaxMajorVersion = "11";
};
# No changes from 11.2 to 11.3
"11.3" = attrs."11.2";
# Added support for Clang 12 and GCC 11
# https://docs.nvidia.com/cuda/archive/11.4.4/cuda-toolkit-release-notes/index.html#cuda-general-new-features
"11.4" = {
clangMaxMajorVersion = "12";
gccMaxMajorVersion = "11";
};
# No changes from 11.4 to 11.5
"11.5" = attrs."11.4";
# No changes from 11.5 to 11.6
"11.6" = attrs."11.5";
# Added support for Clang 13
# https://docs.nvidia.com/cuda/archive/11.7.1/cuda-toolkit-release-notes/index.html#cuda-compiler-new-features
"11.7" = attrs."11.6" // {
clangMaxMajorVersion = "13";
};
# Added support for Clang 14
# https://docs.nvidia.com/cuda/archive/11.8.0/cuda-installation-guide-linux/index.html#system-requirements
"11.8" = attrs."11.7" // {
clangMaxMajorVersion = "14";
};
# Added support for GCC 12
# https://docs.nvidia.com/cuda/archive/12.0.1/cuda-installation-guide-linux/index.html#system-requirements
"12.0" = attrs."11.8" // {
gccMaxMajorVersion = "12";
};
# Added support for Clang 15
# https://docs.nvidia.com/cuda/archive/12.1.1/cuda-toolkit-release-notes/index.html#cuda-compilers-new-features
"12.1" = attrs."12.0" // {
clangMaxMajorVersion = "15";
};
# Added support for Clang 16
# https://docs.nvidia.com/cuda/archive/12.2.2/cuda-installation-guide-linux/index.html#host-compiler-support-policy
"12.2" = attrs."12.1" // {
clangMaxMajorVersion = "16";
};
};
in
attrs

View File

@ -0,0 +1,129 @@
# NOTE: Check https://developer.nvidia.com/nvidia-tensorrt-8x-download.
{
tensorrt.releases = {
# jetson
linux-aarch64 = [];
# powerpc
linux-ppc64le = [];
# server-grade arm
linux-sbsa = [
{
version = "8.2.5.1";
minCudaVersion = "11.4";
maxCudaVersion = "11.4";
cudnnVersion = "8.2";
filename = "TensorRT-8.2.5.1.Ubuntu-20.04.aarch64-gnu.cuda-11.4.cudnn8.2.tar.gz";
hash = "sha256-oWfQ3lq2aoMPv65THeotnMilTzP+QWqKeToLU8eO+qo=";
}
{
version = "8.4.3.1";
minCudaVersion = "11.6";
maxCudaVersion = "11.6";
cudnnVersion = "8.4";
filename = "TensorRT-8.4.3.1.Ubuntu-20.04.aarch64-gnu.cuda-11.6.cudnn8.4.tar.gz";
hash = "sha256-9tLlrB8cKYFvN2xF0Pol5CZs06iuuI5mq+6jpzD8wWI=";
}
{
version = "8.5.3.1";
minCudaVersion = "11.8";
maxCudaVersion = "11.8";
cudnnVersion = "8.6";
filename = "TensorRT-8.5.3.1.Ubuntu-20.04.aarch64-gnu.cuda-11.8.cudnn8.6.tar.gz";
hash = "sha256-GW//mX0brvN/waHo9Wd07xerOEz3X/H/HAW2ZehYtTA=";
}
{
version = "8.6.1.6";
minCudaVersion = "12.0";
maxCudaVersion = "12.0";
cudnnVersion = null;
filename = "TensorRT-8.6.1.6.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz";
hash = "sha256-Lc4+v/yBr17VlecCSFMLUDlXMTYV68MGExwnUjGme5E=";
}
];
# x86_64
linux-x86_64 = [
{
version = "8.0.3.4";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
cudnnVersion = "8.2";
filename = "TensorRT-8.0.3.4.Linux.x86_64-gnu.cuda-10.2.cudnn8.2.tar.gz";
hash = "sha256-LxcXgwe1OCRfwDsEsNLIkeNsOcx3KuF5Sj+g2dY6WD0=";
}
{
version = "8.0.3.4";
minCudaVersion = "11.0";
maxCudaVersion = "11.3";
cudnnVersion = "8.2";
filename = "TensorRT-8.0.3.4.Linux.x86_64-gnu.cuda-11.3.cudnn8.2.tar.gz";
hash = "sha256-MXdDUCT/SqWm26jB7QarEcwOG/O7cS36Y6Q0IvQTE/M=";
}
{
version = "8.2.5.1";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
cudnnVersion = "8.2";
filename = "TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.2.tar.gz";
hash = "sha256-XV2Bf2LH8OM2GEMjV80MDweb1hSVF/wFUcaW3KP2m8Q=";
}
{
# The docs claim this supports through 11.5 despite the file name indicating 11.4.
version = "8.2.5.1";
minCudaVersion = "11.0";
maxCudaVersion = "11.5";
cudnnVersion = "8.2";
filename = "TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz";
hash = "sha256-LcNpYvDiT7AavqzK1MRlijo2qDN7jznigeS77US713E=";
}
{
version = "8.4.3.1";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
cudnnVersion = "8.4";
filename = "TensorRT-8.4.3.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.4.tar.gz";
hash = "sha256-2c3Zzt93FBWWQtrSIvpbzzS6BT9s0NzALzdwXGLOZEU=";
}
{
# The docs claim this supports through 11.7 despite the file name indicating 11.6.
version = "8.4.3.1";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
cudnnVersion = "8.4";
filename = "TensorRT-8.4.3.1.Linux.x86_64-gnu.cuda-11.6.cudnn8.4.tar.gz";
hash = "sha256-jXwghcFjncxzh1BIwjWYqFJs4wiRNoduMdkCWOSeT2E=";
}
{
version = "8.5.3.1";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
cudnnVersion = "8.6";
filename = "TensorRT-8.5.3.1.Linux.x86_64-gnu.cuda-10.2.cudnn8.6.tar.gz";
hash = "sha256-WCt6yfOmFbrjqdYCj6AE2+s2uFpISwk6urP+2I0BnGQ=";
}
{
version = "8.5.3.1";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
cudnnVersion = "8.6";
filename = "TensorRT-8.5.3.1.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz";
hash = "sha256-BNeuOYvPTUAfGxI0DVsNrX6Z/FAB28+SE0ptuGu7YDY=";
}
{
version = "8.6.1.6";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
cudnnVersion = null;
filename = "TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz";
hash = "sha256-Fb/mBT1F/uxF7McSOpEGB2sLQ/oENfJC2J3KB3gzd1k=";
}
{
version = "8.6.1.6";
minCudaVersion = "12.0";
maxCudaVersion = "12.1";
cudnnVersion = null;
filename = "TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz";
hash = "sha256-D4FXpfxTKZQ7M4uJNZE3M1CvqQyoEjnNrddYDNHrolQ=";
}
];
};
}

View File

@ -1,185 +0,0 @@
# NOTE: Check https://docs.nvidia.com/deeplearning/cudnn/archives/index.html for support matrices.
[
{
version = "7.4.2.24";
minCudaVersion = "10.0";
maxCudaVersion = "10.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.4.2/cudnn-10.0-linux-x64-v7.4.2.24.tgz";
hash = "sha256-Lt/IagK1DRfojEeJVaMy5qHoF05+U6NFi06lH68C2qM=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.0";
maxCudaVersion = "10.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.0-linux-x64-v7.6.5.32.tgz";
hash = "sha256-KDVeOV8LK5OsLIO2E2CzW6bNA3fkTni+GXtrYbS0kro=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.1";
maxCudaVersion = "10.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.1-linux-x64-v7.6.5.32.tgz";
hash = "sha256-fq7IA5osMKsLx1jTA1iHZ2k972v0myJIWiwAvy4TbLM=";
}
{
version = "7.6.5.32";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.2-linux-x64-v7.6.5.32.tgz";
hash = "sha256-fq7IA5osMKsLx1jTA1iHZ2k972v0myJIWiwAvy4TbLN=";
}
{
version = "8.0.5.39";
minCudaVersion = "10.1";
maxCudaVersion = "10.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.1-linux-x64-v8.0.5.39.tgz";
hash = "sha256-kJCElSmIlrM6qVBjo0cfk8NmJ9esAcF9w211xl7qSgA=";
}
{
version = "8.0.5.39";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.2-linux-x64-v8.0.5.39.tgz";
hash = "sha256-IfhMBcZ78eyFnnfDjM1b8VSWT6HDCPRJlZvkw1bjgvM=";
}
{
version = "8.0.5.39";
minCudaVersion = "11.0";
maxCudaVersion = "11.0";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.0-linux-x64-v8.0.5.39.tgz";
hash = "sha256-ThbueJXetKixwZS4ErpJWG730mkCBRQB03F1EYmKm3M=";
}
{
version = "8.0.5.39";
minCudaVersion = "11.1";
maxCudaVersion = "11.1";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.1-linux-x64-v8.0.5.39.tgz";
hash = "sha256-HQRr+nk5navMb2yxUHkYdUQ5RC6gyp4Pvs3URvmwDM4=";
}
{
version = "8.1.1.33";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-10.2-linux-x64-v8.1.1.33.tgz";
hash = "sha256-Kkp7mabpv6aQ6xm7QeSVU/KnpJGls6v8rpAOFmxbbr0=";
}
{
version = "8.1.1.33";
minCudaVersion = "11.0";
maxCudaVersion = "11.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-11.2-linux-x64-v8.1.1.33.tgz";
hash = "sha256-mKh4TpKGLyABjSDCgbMNSgzZUfk2lPZDPM9K6cUCumo=";
}
{
version = "8.2.4.15";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.4/cudnn-10.2-linux-x64-v8.2.4.15.tgz";
hash = "sha256-0jyUoxFaHHcRamwSfZF1+/WfcjNkN08mo0aZB18yIvE=";
}
{
version = "8.2.4.15";
minCudaVersion = "11.0";
maxCudaVersion = "11.4";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.4/cudnn-11.4-linux-x64-v8.2.4.15.tgz";
hash = "sha256-Dl0t+JC5ln76ZhnaQhMQ2XMjVlp58FoajLm3Fluq0Nc=";
}
{
version = "8.3.3.40";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.3/local_installers/10.2/cudnn-linux-x86_64-8.3.3.40_cuda10.2-archive.tar.xz";
hash = "sha256-2FVPKzLmKV1fyPOsJeaPlAWLAYyAHaucFD42gS+JJqs=";
}
{
version = "8.3.3.40";
minCudaVersion = "11.0";
maxCudaVersion = "11.6";
url = "https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.3/local_installers/11.5/cudnn-linux-x86_64-8.3.3.40_cuda11.5-archive.tar.xz";
hash = "sha256-6r6Wx1zwPqT1N5iU2RTx+K4UzqsSGYnoSwg22Sf7dzE=";
}
{
version = "8.4.1.50";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.4.1.50_cuda10.2-archive.tar.xz";
hash = "sha256-I88qMmU6lIiLVmaPuX7TTbisgTav839mssxUo3lQNjg=";
}
{
version = "8.4.1.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz";
hash = "sha256-7JbSN22B/KQr3T1MPXBambKaBlurV/kgVhx2PinGfQE=";
}
{
version = "8.5.0.96";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.5.0.96_cuda10-archive.tar.xz";
hash = "sha256-1mzhbbzR40WKkHnQLtJHhg0vYgf7G8a0OBcCwIOkJjM=";
}
{
version = "8.5.0.96";
minCudaVersion = "11.0";
maxCudaVersion = "11.7";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.5.0.96_cuda11-archive.tar.xz";
hash = "sha256-VFSm/ZTwCHKMqumtrZk8ToXvNjAuJrzkO+p9RYpee20=";
}
{
version = "8.6.0.163";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.6.0.163_cuda10-archive.tar.xz";
hash = "sha256-t4sr/GrFqqdxu2VhaJQk5K1Xm/0lU4chXG8hVL09R9k=";
}
{
version = "8.6.0.163";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.6.0.163_cuda11-archive.tar.xz";
hash = "sha256-u8OW30cpTGV+3AnGAGdNYIyxv8gLgtz0VHBgwhcRFZ4=";
}
{
version = "8.7.0.84";
minCudaVersion = "10.2";
maxCudaVersion = "10.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.7.0.84_cuda10-archive.tar.xz";
hash = "sha256-bZhaqc8+GbPV2FQvvbbufd8VnEJgvfkICc2N3/gitRg=";
}
{
version = "8.7.0.84";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz";
hash = "sha256-l2xMunIzyXrnQAavq1Fyl2MAukD1slCiH4z3H1nJ920=";
}
{
version = "8.8.1.3";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.8.1.3_cuda11-archive.tar.xz";
hash = "sha256-r3WEyuDMVSS1kT7wjCm6YVQRPGDrCjegWQqRtRWoqPk=";
}
{
version = "8.8.1.3";
minCudaVersion = "12.0";
maxCudaVersion = "12.0";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.8.1.3_cuda12-archive.tar.xz";
hash = "sha256-edd6dpx+cXWrx7XC7VxJQUjAYYqGQThyLIh/lcYjd3w=";
}
{
version = "8.9.6.50";
minCudaVersion = "11.0";
maxCudaVersion = "11.8";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.6.50_cuda11-archive.tar.xz";
hash = "sha256-oOLvVemfTNZH99HaqlqkUE/6M1ujAYbVwyiPL0ffBX4=";
}
{
version = "8.9.6.50";
minCudaVersion = "12.0";
maxCudaVersion = "12.2";
url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.6.50_cuda12-archive.tar.xz";
hash = "sha256-FyIlnblSZbs4E0OKWhxuzZed6JrkU2YDkEBC4STTAtU=";
}
]