Fedir Zadniprovskyi commited on
Commit
5aa421e
1 Parent(s): 9c0d580

fix: models route not returning openai compatible response

Browse files
faster_whisper_server/gradio_app.py CHANGED
@@ -67,7 +67,7 @@ def create_gradio_demo(config: Config) -> gr.Blocks:
67
  def update_model_dropdown() -> gr.Dropdown:
68
  res = http_client.get("/v1/models")
69
  res_data = res.json()
70
- models: list[str] = [model["id"] for model in res_data]
71
  assert config.whisper.model in models
72
  recommended_models = {model for model in models if model.startswith("Systran")}
73
  other_models = [model for model in models if model not in recommended_models]
 
67
  def update_model_dropdown() -> gr.Dropdown:
68
  res = http_client.get("/v1/models")
69
  res_data = res.json()
70
+ models: list[str] = [model["id"] for model in res_data["data"]]
71
  assert config.whisper.model in models
72
  recommended_models = {model for model in models if model.startswith("Systran")}
73
  other_models = [model for model in models if model not in recommended_models]
faster_whisper_server/main.py CHANGED
@@ -38,6 +38,7 @@ from faster_whisper_server.config import (
38
  from faster_whisper_server.gradio_app import create_gradio_demo
39
  from faster_whisper_server.logger import logger
40
  from faster_whisper_server.server_models import (
 
41
  ModelObject,
42
  TranscriptionJsonResponse,
43
  TranscriptionVerboseJsonResponse,
@@ -85,7 +86,7 @@ def health() -> Response:
85
 
86
 
87
  @app.get("/v1/models")
88
- def get_models() -> list[ModelObject]:
89
  models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition")
90
  models = [
91
  ModelObject(
@@ -97,7 +98,7 @@ def get_models() -> list[ModelObject]:
97
  for model in models
98
  if model.created_at is not None
99
  ]
100
- return models
101
 
102
 
103
  @app.get("/v1/models/{model_name:path}")
 
38
  from faster_whisper_server.gradio_app import create_gradio_demo
39
  from faster_whisper_server.logger import logger
40
  from faster_whisper_server.server_models import (
41
+ ModelListResponse,
42
  ModelObject,
43
  TranscriptionJsonResponse,
44
  TranscriptionVerboseJsonResponse,
 
86
 
87
 
88
  @app.get("/v1/models")
89
+ def get_models() -> ModelListResponse:
90
  models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition")
91
  models = [
92
  ModelObject(
 
98
  for model in models
99
  if model.created_at is not None
100
  ]
101
+ return ModelListResponse(data=models)
102
 
103
 
104
  @app.get("/v1/models/{model_name:path}")
faster_whisper_server/server_models.py CHANGED
@@ -119,6 +119,11 @@ class TranscriptionVerboseJsonResponse(BaseModel):
119
  )
120
 
121
 
 
 
 
 
 
122
  class ModelObject(BaseModel):
123
  id: str
124
  """The model identifier, which can be referenced in the API endpoints."""
 
119
  )
120
 
121
 
122
+ class ModelListResponse(BaseModel):
123
+ data: list[ModelObject]
124
+ object: Literal["list"] = "list"
125
+
126
+
127
  class ModelObject(BaseModel):
128
  id: str
129
  """The model identifier, which can be referenced in the API endpoints."""
pyproject.toml CHANGED
@@ -17,8 +17,8 @@ dependencies = [
17
  ]
18
 
19
  [project.optional-dependencies]
20
- dev = ["ruff", "pytest", "pytest-xdist"]
21
- other = ["youtube-dl@git+https://github.com/ytdl-org/youtube-dl.git", "openai", "aider-chat"]
22
 
23
  # https://docs.astral.sh/ruff/configuration/
24
  [tool.ruff]
 
17
  ]
18
 
19
  [project.optional-dependencies]
20
+ dev = ["ruff", "pytest", "pytest-xdist", "openai"]
21
+ other = ["youtube-dl@git+https://github.com/ytdl-org/youtube-dl.git", "aider-chat"]
22
 
23
  # https://docs.astral.sh/ruff/configuration/
24
  [tool.ruff]
requirements-dev.txt CHANGED
@@ -9,6 +9,7 @@ annotated-types==0.7.0
9
  anyio==4.4.0
10
  # via
11
  # httpx
 
12
  # starlette
13
  # watchfiles
14
  attrs==23.2.0
@@ -38,6 +39,8 @@ ctranslate2==4.3.1
38
  # via faster-whisper
39
  cycler==0.12.1
40
  # via matplotlib
 
 
41
  dnspython==2.6.1
42
  # via email-validator
43
  email-validator==2.2.0
@@ -82,6 +85,7 @@ httpx==0.27.0
82
  # fastapi
83
  # gradio
84
  # gradio-client
 
85
  httpx-sse==0.4.0
86
  # via faster-whisper-server (pyproject.toml)
87
  huggingface-hub==0.23.4
@@ -138,6 +142,8 @@ numpy==2.0.0
138
  # pandas
139
  onnxruntime==1.18.0
140
  # via faster-whisper
 
 
141
  orjson==3.10.5
142
  # via
143
  # fastapi
@@ -170,6 +176,7 @@ pydantic==2.8.0
170
  # faster-whisper-server (pyproject.toml)
171
  # fastapi
172
  # gradio
 
173
  # pydantic-settings
174
  pydantic-core==2.20.0
175
  # via pydantic
@@ -236,6 +243,7 @@ sniffio==1.3.1
236
  # via
237
  # anyio
238
  # httpx
 
239
  soundfile==0.12.1
240
  # via faster-whisper-server (pyproject.toml)
241
  starlette==0.37.2
@@ -249,7 +257,9 @@ tomlkit==0.12.0
249
  toolz==0.12.1
250
  # via altair
251
  tqdm==4.66.4
252
- # via huggingface-hub
 
 
253
  typer==0.12.3
254
  # via
255
  # fastapi-cli
@@ -260,6 +270,7 @@ typing-extensions==4.12.2
260
  # gradio
261
  # gradio-client
262
  # huggingface-hub
 
263
  # pydantic
264
  # pydantic-core
265
  # typer
 
9
  anyio==4.4.0
10
  # via
11
  # httpx
12
+ # openai
13
  # starlette
14
  # watchfiles
15
  attrs==23.2.0
 
39
  # via faster-whisper
40
  cycler==0.12.1
41
  # via matplotlib
42
+ distro==1.9.0
43
+ # via openai
44
  dnspython==2.6.1
45
  # via email-validator
46
  email-validator==2.2.0
 
85
  # fastapi
86
  # gradio
87
  # gradio-client
88
+ # openai
89
  httpx-sse==0.4.0
90
  # via faster-whisper-server (pyproject.toml)
91
  huggingface-hub==0.23.4
 
142
  # pandas
143
  onnxruntime==1.18.0
144
  # via faster-whisper
145
+ openai==1.35.9
146
+ # via faster-whisper-server (pyproject.toml)
147
  orjson==3.10.5
148
  # via
149
  # fastapi
 
176
  # faster-whisper-server (pyproject.toml)
177
  # fastapi
178
  # gradio
179
+ # openai
180
  # pydantic-settings
181
  pydantic-core==2.20.0
182
  # via pydantic
 
243
  # via
244
  # anyio
245
  # httpx
246
+ # openai
247
  soundfile==0.12.1
248
  # via faster-whisper-server (pyproject.toml)
249
  starlette==0.37.2
 
257
  toolz==0.12.1
258
  # via altair
259
  tqdm==4.66.4
260
+ # via
261
+ # huggingface-hub
262
+ # openai
263
  typer==0.12.3
264
  # via
265
  # fastapi-cli
 
270
  # gradio
271
  # gradio-client
272
  # huggingface-hub
273
+ # openai
274
  # pydantic
275
  # pydantic-core
276
  # typer
tests/api_model_test.py CHANGED
@@ -1,4 +1,5 @@
1
  from fastapi.testclient import TestClient
 
2
 
3
  from faster_whisper_server.server_models import ModelObject
4
 
@@ -17,10 +18,8 @@ def model_dict_to_object(model_dict: dict) -> ModelObject:
17
  )
18
 
19
 
20
- def test_list_models(client: TestClient) -> None:
21
- response = client.get("/v1/models")
22
- data = response.json()
23
- models = [model_dict_to_object(model_dict) for model_dict in data]
24
  assert len(models) > MIN_EXPECTED_NUMBER_OF_MODELS
25
 
26
 
 
1
  from fastapi.testclient import TestClient
2
+ from openai import OpenAI
3
 
4
  from faster_whisper_server.server_models import ModelObject
5
 
 
18
  )
19
 
20
 
21
+ def test_list_models(openai_client: OpenAI) -> None:
22
+ models = openai_client.models.list().data
 
 
23
  assert len(models) > MIN_EXPECTED_NUMBER_OF_MODELS
24
 
25
 
tests/conftest.py CHANGED
@@ -2,6 +2,7 @@ from collections.abc import Generator
2
  import logging
3
 
4
  from fastapi.testclient import TestClient
 
5
  import pytest
6
 
7
  from faster_whisper_server.main import app
@@ -19,3 +20,8 @@ def pytest_configure() -> None:
19
  def client() -> Generator[TestClient, None, None]:
20
  with TestClient(app) as client:
21
  yield client
 
 
 
 
 
 
2
  import logging
3
 
4
  from fastapi.testclient import TestClient
5
+ from openai import OpenAI
6
  import pytest
7
 
8
  from faster_whisper_server.main import app
 
20
  def client() -> Generator[TestClient, None, None]:
21
  with TestClient(app) as client:
22
  yield client
23
+
24
+
25
+ @pytest.fixture()
26
+ def openai_client(client: TestClient) -> OpenAI:
27
+ return OpenAI(api_key="cant-be-empty", http_client=client)