Skip to content

Commit cf5d701

Browse files
ystorycopybara-github
authored andcommitted
fix: Remove display_name for non-Vertex file uploads
Merge #1211 ### Description When using the Google.GenAI backend (GEMINI_API), file uploads fail if the `file_data` or `inline_data` parts of the request contain a `display_name`. The Gemini API (non-Vertex) does not support this attribute, causing a `ValueError`. This commit updates the `_preprocess_request` method in the `Gemini` class to sanitize the request. It now iterates through all content parts and sets `display_name` to `None` if the determined backend is `GEMINI_API`. This ensures compatibility, similar to the existing handling of the `labels` attribute. Fixes #1182 ### Testing Plan **1. Unit Tests** - Added a new parameterized test `test_preprocess_request_handles_backend_specific_fields` to `tests/unittests/models/test_google_llm.py`. - This test verifies: - When the backend is `GEMINI_API`, `display_name` in `file_data` and `inline_data` is correctly set to `None`. - When the backend is `VERTEX_AI`, `display_name` remains unchanged. - All unit tests passed successfully. ```shell pytest ./tests/unittests/models/test_google_llm.py ░▒▓ ✔  adk-python   base   system   21:14:02  ============================================================================================ test session starts ============================================================================================ platform darwin -- Python 3.12.10, pytest-8.3.5, pluggy-1.6.0 rootdir: /Users/leo/PycharmProjects/adk-python configfile: pyproject.toml plugins: anyio-4.9.0, langsmith-0.3.42, asyncio-0.26.0, mock-3.14.0, xdist-3.6.1 asyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=function, asyncio_default_test_loop_scope=function collected 20 items tests/unittests/models/test_google_llm.py .................... [100%] ============================================================================================ 20 passed in 3.19s ============================================================================================= ``` **2. Manual End-to-End (E2E) Test** I manually verified the fix using `adk web`. The test was configured to use a **Google AI Studio API key**, which is the scenario where the bug occurs. - **Before the fix:** When uploading a file, the request failed with the error: `{"error": "display_name parameter is not supported in Gemini API."}`. This confirms the bug. <img width="968" alt="Screenshot 2025-06-06 at 21 22 35" src="https://github.com/user-attachments/assets/f1ab2db2-d5ec-40fc-a182-9932562b21e1" /> - **After the fix:** With the patch applied, the same file upload was processed successfully. The agent correctly analyzed the file and responded without errors. <img width="973" alt="Screenshot 2025-06-06 at 21 23 24" src="https://github.com/user-attachments/assets/e03228f6-0b7d-4bf9-955a-ac24efb4fb72" /> COPYBARA_INTEGRATE_REVIEW=#1211 from ystory:fix/display-name d3efebe PiperOrigin-RevId: 769278445
1 parent f38c08b commit cf5d701

File tree

2 files changed

+107
-2
lines changed

2 files changed

+107
-2
lines changed

src/google/adk/models/google_llm.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from typing import AsyncGenerator
2424
from typing import cast
2525
from typing import TYPE_CHECKING
26+
from typing import Union
2627

2728
from google.genai import Client
2829
from google.genai import types
@@ -244,9 +245,18 @@ async def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
244245

245246
def _preprocess_request(self, llm_request: LlmRequest) -> None:
246247

247-
if llm_request.config and self._api_backend == GoogleLLMVariant.GEMINI_API:
248+
if self._api_backend == GoogleLLMVariant.GEMINI_API:
248249
# Using API key from Google AI Studio to call model doesn't support labels.
249-
llm_request.config.labels = None
250+
if llm_request.config:
251+
llm_request.config.labels = None
252+
253+
if llm_request.contents:
254+
for content in llm_request.contents:
255+
if not content.parts:
256+
continue
257+
for part in content.parts:
258+
_remove_display_name_if_present(part.inline_data)
259+
_remove_display_name_if_present(part.file_data)
250260

251261

252262
def _build_function_declaration_log(
@@ -324,3 +334,15 @@ def _build_response_log(resp: types.GenerateContentResponse) -> str:
324334
{resp.model_dump_json(exclude_none=True)}
325335
-----------------------------------------------------------
326336
"""
337+
338+
339+
def _remove_display_name_if_present(
340+
data_obj: Union[types.Blob, types.FileData, None],
341+
):
342+
"""Sets display_name to None for the Gemini API (non-Vertex) backend.
343+
344+
This backend does not support the display_name parameter for file uploads,
345+
so it must be removed to prevent request failures.
346+
"""
347+
if data_obj and data_obj.display_name:
348+
data_obj.display_name = None

tests/unittests/models/test_google_llm.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import os
1616
import sys
17+
from typing import Optional
1718
from unittest import mock
1819

1920
from google.adk import version as adk_version
@@ -23,6 +24,7 @@
2324
from google.adk.models.google_llm import Gemini
2425
from google.adk.models.llm_request import LlmRequest
2526
from google.adk.models.llm_response import LlmResponse
27+
from google.adk.utils.variant_utils import GoogleLLMVariant
2628
from google.genai import types
2729
from google.genai import version as genai_version
2830
from google.genai.types import Content
@@ -337,3 +339,84 @@ async def __aexit__(self, *args):
337339
):
338340
async with gemini_llm.connect(llm_request) as connection:
339341
assert connection is mock_connection
342+
343+
344+
@pytest.mark.parametrize(
345+
(
346+
"api_backend, "
347+
"expected_file_display_name, "
348+
"expected_inline_display_name, "
349+
"expected_labels"
350+
),
351+
[
352+
(
353+
GoogleLLMVariant.GEMINI_API,
354+
None,
355+
None,
356+
None,
357+
),
358+
(
359+
GoogleLLMVariant.VERTEX_AI,
360+
"My Test PDF",
361+
"My Test Image",
362+
{"key": "value"},
363+
),
364+
],
365+
)
366+
def test_preprocess_request_handles_backend_specific_fields(
367+
gemini_llm: Gemini,
368+
api_backend: GoogleLLMVariant,
369+
expected_file_display_name: Optional[str],
370+
expected_inline_display_name: Optional[str],
371+
expected_labels: Optional[str],
372+
):
373+
"""
374+
Tests that _preprocess_request correctly sanitizes fields based on the API backend.
375+
376+
- For GEMINI_API, it should remove 'display_name' from file/inline data
377+
and remove 'labels' from the config.
378+
- For VERTEX_AI, it should leave these fields untouched.
379+
"""
380+
# Arrange: Create a request with fields that need to be preprocessed.
381+
llm_request_with_files = LlmRequest(
382+
model="gemini-1.5-flash",
383+
contents=[
384+
Content(
385+
role="user",
386+
parts=[
387+
Part(
388+
file_data=types.FileData(
389+
file_uri="gs://bucket/file.pdf",
390+
mime_type="application/pdf",
391+
display_name="My Test PDF",
392+
)
393+
),
394+
Part(
395+
inline_data=types.Blob(
396+
data=b"some_bytes",
397+
mime_type="image/png",
398+
display_name="My Test Image",
399+
)
400+
),
401+
],
402+
)
403+
],
404+
config=types.GenerateContentConfig(labels={"key": "value"}),
405+
)
406+
407+
# Mock the _api_backend property to control the test scenario
408+
with mock.patch.object(
409+
Gemini, "_api_backend", new_callable=mock.PropertyMock
410+
) as mock_backend:
411+
mock_backend.return_value = api_backend
412+
413+
# Act: Run the preprocessing method
414+
gemini_llm._preprocess_request(llm_request_with_files)
415+
416+
# Assert: Check if the fields were correctly processed
417+
file_part = llm_request_with_files.contents[0].parts[0]
418+
inline_part = llm_request_with_files.contents[0].parts[1]
419+
420+
assert file_part.file_data.display_name == expected_file_display_name
421+
assert inline_part.inline_data.display_name == expected_inline_display_name
422+
assert llm_request_with_files.config.labels == expected_labels

0 commit comments

Comments
 (0)