Skip to content

Commit e9d472e

Browse files
fix(generative-ai): Update count tokens sample (#11713)
* fix(generative-ai): Update count tokens sample - Change to new format - Add multimodal example * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Fix test issues * Update generative_ai/gemini_count_token_example.py * Fixed test issues --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 918b2bd commit e9d472e

File tree

3 files changed

+64
-5
lines changed

3 files changed

+64
-5
lines changed

generative_ai/embedding_model_tuning_test.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,8 @@ def test_tune_embedding_model() -> None:
4343
credentials=credentials,
4444
)
4545
tuning_job = embedding_model_tuning.tune_embedding_model(
46-
aiplatform_init.global_config.api_endpoint)
46+
aiplatform_init.global_config.api_endpoint
47+
)
4748
try:
4849
assert tuning_job._status.name != "PIPELINE_STATE_FAILED"
4950
finally:

generative_ai/gemini_count_token_example.py

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,12 @@
1313
# limitations under the License.
1414

1515

16-
def count_tokens(project_id: str) -> str:
16+
from vertexai.generative_models import GenerationResponse
17+
18+
19+
def count_tokens(project_id: str) -> GenerationResponse:
1720
# [START generativeaionvertexai_gemini_token_count]
1821
import vertexai
19-
2022
from vertexai.generative_models import GenerativeModel
2123

2224
# TODO(developer): Update and un-comment below line
@@ -26,8 +28,59 @@ def count_tokens(project_id: str) -> str:
2628

2729
model = GenerativeModel(model_name="gemini-1.0-pro-002")
2830

29-
response = model.count_tokens("Why is sky blue?")
30-
print(response)
31+
prompt = "Why is the sky blue?"
32+
33+
# Prompt tokens count
34+
response = model.count_tokens(prompt)
35+
print(f"Prompt Token Count: {response.total_tokens}")
36+
print(f"Prompt Character Count: {response.total_billable_characters}")
37+
38+
# Send text to Gemini
39+
response = model.generate_content(prompt)
40+
41+
# Response tokens count
42+
usage_metadata = response.usage_metadata
43+
print(f"Prompt Token Count: {usage_metadata.prompt_token_count}")
44+
print(f"Candidates Token Count: {usage_metadata.candidates_token_count}")
45+
print(f"Total Token Count: {usage_metadata.total_token_count}")
3146

3247
# [END generativeaionvertexai_gemini_token_count]
3348
return response
49+
50+
51+
def count_tokens_multimodal(project_id: str) -> GenerationResponse:
52+
# [START generativeaionvertexai_gemini_token_count_multimodal]
53+
import vertexai
54+
from vertexai.generative_models import GenerativeModel, Part
55+
56+
# TODO(developer): Update and un-comment below lines
57+
# project_id = "PROJECT_ID"
58+
59+
vertexai.init(project=project_id, location="us-central1")
60+
61+
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")
62+
63+
contents = [
64+
Part.from_uri(
65+
"gs://cloud-samples-data/generative-ai/video/pixel8.mp4",
66+
mime_type="video/mp4",
67+
),
68+
"Provide a description of the video.",
69+
]
70+
71+
# Prompt tokens count
72+
response = model.count_tokens(contents)
73+
print(f"Prompt Token Count: {response.total_tokens}")
74+
print(f"Prompt Character Count: {response.total_billable_characters}")
75+
76+
# Send text to Gemini
77+
response = model.generate_content(contents)
78+
usage_metadata = response.usage_metadata
79+
80+
# Response tokens count
81+
print(f"Prompt Token Count: {usage_metadata.prompt_token_count}")
82+
print(f"Candidates Token Count: {usage_metadata.candidates_token_count}")
83+
print(f"Total Token Count: {usage_metadata.total_token_count}")
84+
85+
# [END generativeaionvertexai_gemini_token_count_multimodal]
86+
return response

generative_ai/test_gemini_examples.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,11 @@ def test_gemini_multi_image_example() -> None:
8282
def test_gemini_count_token_example() -> None:
8383
response = gemini_count_token_example.count_tokens(PROJECT_ID)
8484
assert response
85+
assert response.usage_metadata
86+
87+
response = gemini_count_token_example.count_tokens_multimodal(PROJECT_ID)
88+
assert response
89+
assert response.usage_metadata
8590

8691

8792
def test_gemini_safety_config_example() -> None:

0 commit comments

Comments
 (0)