Merge pull request #336924 from natsukium/openllm/update

python312Packages.openllm: 0.5.7 -> 0.6.10; python312Packages.bentoml: 1.2.18 -> 1.3.3
This commit is contained in:
OTABI Tomoya 2024-08-26 08:40:21 +09:00 committed by GitHub
commit c15b8367b6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 105 additions and 406 deletions

View File

@ -0,0 +1,76 @@
{
lib,
fetchFromGitHub,
python3,
}:
let
python = python3.override {
self = python;
packageOverrides = _: super: {
cattrs = super.cattrs.overridePythonAttrs (oldAttrs: rec {
version = "23.1.2";
build-system = [ super.poetry-core ];
src = oldAttrs.src.override {
rev = "refs/tags/v${version}";
hash = "sha256-YO4Clbo5fmXbysxwwM2qCHJwO5KwDC05VctRVFruJcw=";
};
});
};
};
in
python.pkgs.buildPythonApplication rec {
pname = "openllm";
version = "0.6.10";
pyproject = true;
src = fetchFromGitHub {
owner = "bentoml";
repo = "openllm";
rev = "refs/tags/v${version}";
hash = "sha256-4KIpe6KjbBDDUj0IjzSccxjgZyBoaUVIQJYk1+W01Vo=";
};
pythonRemoveDeps = [
"pathlib"
"pip-requirements-parser"
];
pythonRelaxDeps = [ "openai" ];
build-system = with python.pkgs; [
hatch-vcs
hatchling
];
dependencies = with python.pkgs; [
accelerate
bentoml
dulwich
nvidia-ml-py
openai
psutil
pyaml
questionary
tabulate
typer
uv
];
# no tests
doCheck = false;
pythonImportsCheck = [ "openllm" ];
meta = with lib; {
description = "Run any open-source LLMs, such as Llama 3.1, Gemma, as OpenAI compatible API endpoint in the cloud";
homepage = "https://github.com/bentoml/OpenLLM";
changelog = "https://github.com/bentoml/OpenLLM/releases/tag/v${version}";
license = licenses.asl20;
maintainers = with maintainers; [
happysalada
natsukium
];
mainProgram = "openllm";
};
}

View File

@ -7,6 +7,7 @@
hatchling,
hatch-vcs,
aiohttp,
aiosqlite,
attrs,
cattrs,
circus,
@ -15,25 +16,38 @@
cloudpickle,
deepmerge,
fs,
fs-s3fs,
grpcio,
grpcio-channelz,
grpcio-health-checking,
grpcio-reflection,
httpx,
httpx-ws,
inflection,
inquirerpy,
jinja2,
numpy,
nvidia-ml-py,
opentelemetry-api,
opentelemetry-exporter-otlp,
opentelemetry-exporter-otlp-proto-http,
opentelemetry-instrumentation,
opentelemetry-instrumentation-aiohttp-client,
opentelemetry-instrumentation-asgi,
opentelemetry-instrumentation-grpc,
opentelemetry-sdk,
opentelemetry-semantic-conventions,
opentelemetry-util-http,
packaging,
pandas,
pathspec,
pillow,
pip-requirements-parser,
pip-tools,
prometheus-client,
protobuf,
psutil,
pyarrow,
pydantic,
python-dateutil,
python-json-logger,
python-multipart,
@ -44,25 +58,10 @@
starlette,
tomli,
tomli-w,
tritonclient,
uv,
uvicorn,
watchfiles,
fs-s3fs,
grpcio,
grpcio-health-checking,
opentelemetry-instrumentation-grpc,
protobuf,
grpcio-channelz,
grpcio-reflection,
pillow,
pydantic,
pandas,
pyarrow,
opentelemetry-exporter-otlp-proto-http,
# https://pypi.org/project/opentelemetry-exporter-jaeger-proto-grpc/
# , opentelemetry-exporter-jaeger # support for this exporter ends in july 2023
opentelemetry-exporter-otlp,
# , opentelemetry-exporter-zipkin
tritonclient,
# native check inputs
pytestCheckHook,
pytest-xdist,
@ -75,7 +74,7 @@
}:
let
version = "1.2.18";
version = "1.3.3";
aws = [ fs-s3fs ];
grpc = [
grpcio
@ -90,7 +89,10 @@ let
];
grpc-reflection = grpc ++ [ grpcio-reflection ];
grpc-channelz = grpc ++ [ grpcio-channelz ];
monitor-otlp = [ opentelemetry-exporter-otlp-proto-http ];
monitor-otlp = [
opentelemetry-exporter-otlp-proto-http
opentelemetry-instrumentation-grpc
];
# tracing-jaeger = [ opentelemetry-exporter-jaeger ];
tracing-otlp = [ opentelemetry-exporter-otlp ];
# tracing-zipkin = [ opentelemetry-exporter-zipkin ];
@ -126,7 +128,7 @@ buildPythonPackage {
owner = "bentoml";
repo = "BentoML";
rev = "refs/tags/v${version}";
hash = "sha256-giZteSikwS9YEcVMPCC9h2khbBgvUPRW1biAyixO13Y=";
hash = "sha256-PjmXPSPukLJ+iCpBdUynhcWCfFqplmdsgj0LYpodE/c=";
};
pythonRelaxDeps = [
@ -148,6 +150,7 @@ buildPythonPackage {
dependencies = [
aiohttp
aiosqlite
attrs
cattrs
circus
@ -159,6 +162,7 @@ buildPythonPackage {
httpx
httpx-ws
inflection
inquirerpy
jinja2
numpy
nvidia-ml-py
@ -172,7 +176,6 @@ buildPythonPackage {
packaging
pathspec
pip-requirements-parser
pip-tools
prometheus-client
psutil
pydantic
@ -185,6 +188,7 @@ buildPythonPackage {
simple-di
starlette
tomli-w
uv
uvicorn
watchfiles
] ++ lib.optionals (pythonOlder "3.11") [ tomli ];

View File

@ -1,70 +0,0 @@
{
lib,
buildPythonPackage,
pythonOlder,
bentoml,
hatch-fancy-pypi-readme,
hatch-vcs,
hatchling,
anyio,
distro,
httpx,
httpx-auth,
openllm-core,
soundfile,
transformers,
}:
buildPythonPackage rec {
inherit (openllm-core) src version;
pname = "openllm-client";
pyproject = true;
disabled = pythonOlder "3.8";
sourceRoot = "${src.name}/openllm-client";
postPatch = ''
substituteInPlace pyproject.toml \
--replace-fail "hatchling==1.18.0" "hatchling" \
--replace-fail "hatch-vcs==0.3.0" "hatch-vcs" \
--replace-fail "hatch-fancy-pypi-readme==23.1.0" "hatch-fancy-pypi-readme"
'';
build-system = [
hatch-fancy-pypi-readme
hatch-vcs
hatchling
];
dependencies = [
anyio
distro
httpx
openllm-core
];
optional-dependencies = {
grpc = [ bentoml ] ++ bentoml.optional-dependencies.grpc;
auth = [ httpx-auth ];
agents = [
transformers
# diffusers
soundfile
] ++ transformers.optional-dependencies.agents;
full = optional-dependencies.grpc ++ optional-dependencies.agents;
};
# there is no tests
doCheck = false;
pythonImportsCheck = [ "openllm_client" ];
meta = with lib; {
description = "Interacting with OpenLLM HTTP/gRPC server, or any BentoML server";
homepage = "https://github.com/bentoml/OpenLLM/tree/main/openllm-client";
changelog = "https://github.com/bentoml/OpenLLM/blob/${src.rev}/CHANGELOG.md";
license = licenses.asl20;
maintainers = with maintainers; [ natsukium ];
};
}

View File

@ -1,106 +0,0 @@
{
lib,
buildPythonPackage,
fetchFromGitHub,
pythonOlder,
accelerate,
attrs,
bitsandbytes,
bentoml,
cattrs,
click-option-group,
datasets,
deepmerge,
hatch-fancy-pypi-readme,
hatch-vcs,
hatchling,
inflection,
mypy-extensions,
orjson,
peft,
pydantic,
transformers,
typing-extensions,
}:
buildPythonPackage rec {
pname = "openllm-core";
version = "0.5.7";
pyproject = true;
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "bentoml";
repo = "OpenLLM";
rev = "refs/tags/v${version}";
hash = "sha256-sEZLszzoo39WUnziHGp7zWNO0YaqkXeXAoIxvyhw42w=";
};
sourceRoot = "${src.name}/openllm-core";
postPatch = ''
substituteInPlace pyproject.toml \
--replace-fail "hatch-vcs==" "hatch-vcs>=" \
--replace-fail "hatchling==" "hatchling>=" \
--replace-fail "hatch-fancy-pypi-readme==" "hatch-fancy-pypi-readme>="
'';
pythonRelaxDeps = [ "cattrs" ];
build-system = [
hatch-fancy-pypi-readme
hatch-vcs
hatchling
];
dependencies = [
attrs
cattrs
pydantic
# not listed in pyproject.toml, but required at runtime
click-option-group
deepmerge
inflection
mypy-extensions
orjson
typing-extensions
];
optional-dependencies = {
vllm = [
# vllm
];
bentoml = [ bentoml ];
fine-tune = [
accelerate
bitsandbytes
datasets
peft
transformers
# trl
] ++ transformers.optional-dependencies.torch ++ transformers.optional-dependencies.tokenizers;
full =
with optional-dependencies;
(
vllm
# use absolute path to disambiguate with derivbation argument
++ optional-dependencies.bentoml
++ fine-tune
);
};
# there is no tests
doCheck = false;
pythonImportsCheck = [ "openllm_core" ];
meta = with lib; {
description = "Core components for OpenLLM";
homepage = "https://github.com/bentoml/OpenLLM/tree/main/openllm-core";
changelog = "https://github.com/bentoml/OpenLLM/blob/${src.rev}/CHANGELOG.md";
license = licenses.asl20;
maintainers = with maintainers; [ natsukium ];
};
}

View File

@ -1,200 +0,0 @@
{
lib,
buildPythonPackage,
hatch-fancy-pypi-readme,
hatch-vcs,
hatchling,
pytestCheckHook,
pythonOlder,
accelerate,
bentoml,
bitsandbytes,
build,
click,
ctranslate2,
datasets,
docker,
einops,
ghapi,
huggingface-hub,
hypothesis,
ipython,
jupyter,
jupytext,
nbformat,
notebook,
openai,
openllm-client,
openllm-core,
optimum,
peft,
pytest-mock,
pytest-randomly,
pytest-rerunfailures,
pytest-xdist,
safetensors,
scipy,
sentencepiece,
soundfile,
syrupy,
tabulate,
tiktoken,
transformers,
triton,
xformers,
}:
buildPythonPackage rec {
inherit (openllm-core) src version;
pname = "openllm";
pyproject = true;
disabled = pythonOlder "3.8";
sourceRoot = "${src.name}/openllm-python";
pythonRemoveDeps = [
# remove cuda-python as it has an unfree license
"cuda-python"
];
build-system = [
hatch-fancy-pypi-readme
hatch-vcs
hatchling
];
dependencies =
[
accelerate
bentoml
bitsandbytes
build
click
einops
ghapi
openllm-client
openllm-core
optimum
safetensors
scipy
sentencepiece
transformers
]
++ bentoml.optional-dependencies.io
++ tabulate.optional-dependencies.widechars
++ transformers.optional-dependencies.tokenizers
++ transformers.optional-dependencies.torch;
optional-dependencies = {
agents = [
# diffusers
soundfile
transformers
] ++ transformers.optional-dependencies.agents;
awq = [
# autoawq
];
baichuan = [
# cpm-kernels
];
chatglm = [
# cpm-kernels
];
ctranslate = [ ctranslate2 ];
falcon = [ xformers ];
fine-tune = [
datasets
huggingface-hub
peft
# trl
];
ggml = [
# ctransformers
];
gptq = [
# auto-gptq
]; # ++ autogptq.optional-dependencies.triton;
grpc = [ bentoml ] ++ bentoml.optional-dependencies.grpc;
mpt = [ triton ];
openai = [
openai
tiktoken
] ++ openai.optional-dependencies.datalib;
playground = [
ipython
jupyter
jupytext
nbformat
notebook
];
starcoder = [ bitsandbytes ];
vllm = [
# vllm
];
full =
with optional-dependencies;
(
agents
++ awq
++ baichuan
++ chatglm
++ ctranslate
++ falcon
++ fine-tune
++ ggml
++ gptq
++ mpt
# disambiguate between derivation input and passthru field
++ optional-dependencies.openai
++ playground
++ starcoder
++ vllm
);
all = optional-dependencies.full;
};
nativeCheckInputs = [
docker
hypothesis
pytest-mock
pytest-randomly
pytest-rerunfailures
pytest-xdist
pytestCheckHook
syrupy
];
preCheck = ''
export HOME=$TMPDIR
# skip GPUs test on CI
export GITHUB_ACTIONS=1
# disable hypothesis' deadline
export CI=1
'';
disabledTestPaths = [
# require network access
"tests/models"
];
disabledTests = [
# incompatible with recent TypedDict
# https://github.com/bentoml/OpenLLM/blob/f3fd32d596253ae34c68e2e9655f19f40e05f666/openllm-python/tests/configuration_test.py#L18-L21
"test_missing_default"
];
pythonImportsCheck = [ "openllm" ];
meta = with lib; {
description = "Operating LLMs in production";
homepage = "https://github.com/bentoml/OpenLLM/tree/main/openllm-python";
changelog = "https://github.com/bentoml/OpenLLM/blob/${src.rev}/CHANGELOG.md";
license = licenses.asl20;
maintainers = with maintainers; [
happysalada
natsukium
];
};
}

View File

@ -354,6 +354,9 @@ mapAliases ({
openai-triton-no-cuda = triton-no-cuda; # added 2024-07-18
openapi-schema-pydantic = throw "openapi-schema-pydantic has been removed, since it is no longer maintained"; # added 2023-10-30
opencv3 = throw "opencv3 has been removed as it is obsolete"; # added 2023-10-12
openllm = throw "openllm has moved to pkgs.openllm"; # added 2021-12-31
openllm-client = throw "openllm-client has been removed, since it is abandoned due to a change in philosophy"; # added 2024-08-24
openllm-core = throw "openllm-core has been removed, since it is abandoned due to a change in philosophy"; # added 2024-08-24
opsdroid_get_image_size = opsdroid-get-image-size; # added 2023-10-16
ordereddict = throw "ordereddict has been removed because it is only useful on unsupported python versions."; # added 2022-05-28
pafy = throw "pafy has been removed because it is unmaintained and only a dependency of mps-youtube, itself superseded by yewtube"; # Added 2023-01-19

View File

@ -4651,14 +4651,6 @@ self: super: with self; {
oelint-parser = callPackage ../development/python-modules/oelint-parser { };
openllm = callPackage ../development/python-modules/openllm {
triton = self.triton-cuda;
};
openllm-client = callPackage ../development/python-modules/openllm-client { };
openllm-core = callPackage ../development/python-modules/openllm-core { };
openstep-parser = callPackage ../development/python-modules/openstep-parser { };
openstep-plist = callPackage ../development/python-modules/openstep-plist { };