Skip to content

Fix mypy issue vllm evidently #3169

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -476,5 +476,7 @@ module = [
"prodigy.components.db.*",
"transformers.*",
"langchain_community.*",
"vllm.*",
"numba.*",
]
ignore_missing_imports = true
2 changes: 1 addition & 1 deletion src/zenml/integrations/evidently/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

# Fix numba errors in Docker and suppress logs and deprecation warning spam
try:
from numba.core.errors import ( # type: ignore[import-not-found]
from numba.core.errors import (
NumbaDeprecationWarning,
NumbaPendingDeprecationWarning,
)
Expand Down
23 changes: 16 additions & 7 deletions src/zenml/integrations/vllm/services/vllm_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# permissions and limitations under the License.
"""Implementation of the vLLM Inference Server Service."""

import argparse
import os
from typing import Any, List, Optional, Union

Expand Down Expand Up @@ -137,15 +138,23 @@ def run(self) -> None:
self.endpoint.prepare_for_start()

import uvloop
from vllm.entrypoints.openai.api_server import run_server
from vllm.entrypoints.openai.cli_args import make_arg_parser
from vllm.utils import FlexibleArgumentParser
from vllm.entrypoints.openai.api_server import (
run_server,
)
from vllm.entrypoints.openai.cli_args import (
make_arg_parser,
)
from vllm.utils import (
FlexibleArgumentParser,
)

try:
parser = make_arg_parser(FlexibleArgumentParser())
args = parser.parse_args()
parser: argparse.ArgumentParser = make_arg_parser(
FlexibleArgumentParser()
)
args: argparse.Namespace = parser.parse_args()
# Override port with the available port
self.config.port = self.endpoint.status.port
self.config.port = self.endpoint.status.port or self.config.port
# Update the arguments in place
args.__dict__.update(self.config.model_dump())
uvloop.run(run_server(args=args))
Expand All @@ -161,7 +170,7 @@ def prediction_url(self) -> Optional[str]:
"""
if not self.is_running:
return None
return self.endpoint.prediction_url_path
return self.endpoint.prediction_url

def predict(self, data: "Any") -> "Any":
"""Make a prediction using the service.
Expand Down
Loading