Gemini
Using Gemini models.
Get Your API Key
Go to the API Keys page to create your API key.
API Base URL
https://tokenoff.com/apiGemini models are accessed via the Gemini API, fully compatible with the Google Generative AI API.
curl
Blocking
curl https://tokenoff.com/api/v1beta/models/gemini-2.5-pro:generateContent \
-H "Authorization: Bearer your_api_key_here" \
-H "Content-Type: application/json" \
-d '{
"contents": [
{
"role": "user",
"parts": [
{"text": "Hello"}
]
}
]
}'
Output:
{
"candidates": [
{
"content": {
"parts": [
{
"text": "Hello! How can I help you today?"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0
}
],
"usageMetadata": {
"promptTokenCount": 2,
"candidatesTokenCount": 9,
"totalTokenCount": 342,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 2
}
],
"thoughtsTokenCount": 331
},
"modelVersion": "gemini-2.5-pro",
"responseId": "3-3gafT1A7mn1MkP1qHryAc"
}Streaming
curl https://tokenoff.com/api/v1beta/models/gemini-2.5-pro:streamGenerateContent \
-H "Authorization: Bearer your_api_key_here" \
-H "Content-Type: application/json" \
-d '{
"contents": [
{
"role": "user",
"parts": [
{"text": "Hello"}
]
}
]
}'
Output:
data: {"candidates": [{"content": {"parts": [{"text": "Hello there! How can I help you today?"}],"role": "model"},"index": 0}],"usageMetadata": {"promptTokenCount": 2,"candidatesTokenCount": 10,"totalTokenCount": 424,"promptTokensDetails": [{"modality": "TEXT","tokenCount": 2}],"thoughtsTokenCount": 412},"modelVersion": "gemini-2.5-pro","responseId": "I-7gaaaYE92p1MkPkPr1wAM"}
data: {"candidates": [{"content": {"role": "model"},"finishReason": "STOP","index": 0}],"usageMetadata": {"promptTokenCount": 2,"candidatesTokenCount": 10,"totalTokenCount": 424,"promptTokensDetails": [{"modality": "TEXT","tokenCount": 2}],"thoughtsTokenCount": 412},"modelVersion": "gemini-2.5-pro","responseId": "I-7gaaaYE92p1MkPkPr1wAM"}Python
pip install google-genaiBlocking
from google import genai
client = genai.Client(
api_key="your_api_key_here",
http_options=genai.types.HttpOptions(
base_url="https://tokenoff.com/api",
),
)
response = client.models.generate_content(
model="gemini-2.5-pro",
contents="Hello",
)
print(response.text)
Output:
Hello there! How can I help you today?Streaming
from google import genai
client = genai.Client(
api_key="your_api_key_here",
http_options=genai.types.HttpOptions(
base_url="https://tokenoff.com/api",
),
)
response = client.models.generate_content_stream(
model="gemini-2.5-pro",
contents="Hello",
)
for chunk in response:
print(chunk)
Output:
sdk_http_response=HttpResponse(
headers=<dict len=11>
) candidates=[Candidate(
content=Content(
parts=[
Part(
text='Hello there! How can I help you today?'
),
],
role='model'
),
index=0
)] create_time=None model_version='gemini-2.5-pro' prompt_feedback=None response_id='KvDgaZalJPqk1MkP4u3XkQc' usage_metadata=GenerateContentResponseUsageMetadata(
candidates_token_count=10,
prompt_token_count=2,
prompt_tokens_details=[
ModalityTokenCount(
modality=<MediaModality.TEXT: 'TEXT'>,
token_count=2
),
],
thoughts_token_count=511,
total_token_count=523
) model_status=None automatic_function_calling_history=None parsed=None
sdk_http_response=HttpResponse(
headers=<dict len=11>
) candidates=[Candidate(
content=Content(
role='model'
),
finish_reason=<FinishReason.STOP: 'STOP'>,
index=0
)] create_time=None model_version='gemini-2.5-pro' prompt_feedback=None response_id='KvDgaZalJPqk1MkP4u3XkQc' usage_metadata=GenerateContentResponseUsageMetadata(
candidates_token_count=10,
prompt_token_count=2,
prompt_tokens_details=[
ModalityTokenCount(
modality=<MediaModality.TEXT: 'TEXT'>,
token_count=2
),
],
thoughts_token_count=511,
total_token_count=523
) model_status=None automatic_function_calling_history=None parsed=NoneTypeScript
npm install @google/genaiBlocking
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({
apiKey: "your_api_key_here",
httpOptions: {
baseUrl: "https://tokenoff.com/api",
},
});
const response = await ai.models.generateContent({
model: "gemini-2.5-pro",
contents: "Hello",
});
console.log(response.text);
Output:
Hello! How can I help you today?Streaming
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({
apiKey: "your_api_key_here",
httpOptions: {
baseUrl: "https://tokenoff.com/api",
},
});
const response = await ai.models.generateContentStream({
model: "gemini-2.5-pro",
contents: "Hello",
});
for await (const event of response) {
console.log(event);
}
Output:
GenerateContentResponse {
candidates: [ { content: [Object], index: 0 } ],
modelVersion: 'gemini-2.5-pro',
responseId: '6vHgaYq7H9qk1MkPxqDcqQ0',
usageMetadata: {
promptTokenCount: 2,
candidatesTokenCount: 4,
totalTokenCount: 344,
promptTokensDetails: [ [Object] ],
thoughtsTokenCount: 338
},
sdkHttpResponse: {
headers: {
'alt-svc': 'h3=":443"; ma=2592000, h3-29=":443"; ma=2592000',
'content-disposition': 'attachment',
'content-type': 'text/event-stream',
date: 'Thu, 16 Apr 2026 14:27:57 GMT',
server: 'scaffolding, on, HTTPServer2',
'server-timing': 'gfet4t7; dur=4180',
'transfer-encoding': 'chunked',
vary: 'Origin, X-Origin, Referer',
'x-content-type-options': 'nosniff',
'x-frame-options': 'SAMEORIGIN',
'x-xss-protection': '0'
}
}
}
GenerateContentResponse {
candidates: [ { content: [Object], finishReason: 'STOP', index: 0 } ],
modelVersion: 'gemini-2.5-pro',
responseId: '6vHgaYq7H9qk1MkPxqDcqQ0',
usageMetadata: {
promptTokenCount: 2,
candidatesTokenCount: 9,
totalTokenCount: 349,
promptTokensDetails: [ [Object] ],
thoughtsTokenCount: 338
},
sdkHttpResponse: {
headers: {
'alt-svc': 'h3=":443"; ma=2592000, h3-29=":443"; ma=2592000',
'content-disposition': 'attachment',
'content-type': 'text/event-stream',
date: 'Thu, 16 Apr 2026 14:27:57 GMT',
server: 'scaffolding, on, HTTPServer2',
'server-timing': 'gfet4t7; dur=4180',
'transfer-encoding': 'chunked',
vary: 'Origin, X-Origin, Referer',
'x-content-type-options': 'nosniff',
'x-frame-options': 'SAMEORIGIN',
'x-xss-protection': '0'
}
}
}Supported Models
gemini-3.1-pro-preview
Gemini 3.1 Pro Preview
gemini-3-flash-preview
Gemini 3 Flash Preview
gemini-2.5-pro
Gemini 2.5 Pro
gemini-2.5-flash
Gemini 2.5 Flash
gemini-2.5-flash-lite
Gemini 2.5 Flash Lite
Contact Us
If you encounter any issues while using TokenOff:
Contact us atsupport@tokenoff.comand other official channels for technical support
Submit an issue on ourGithubrepository