Tool Calling
Function calling and tool integration with the Responses API
The Responses API supports comprehensive tool calling capabilities, allowing models to call functions, execute tools in parallel, and handle complex multi-step workflows.
Basic Tool Definition
Define tools using the OpenAI function calling format:
const weatherTool = {
type: 'function' as const,
name: 'get_weather',
description: 'Get the current weather in a location',
strict: null,
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state, e.g. San Francisco, CA',
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
};
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'What is the weather in San Francisco?',
},
],
},
],
tools: [weatherTool],
tool_choice: 'auto',
max_output_tokens: 9000,
}),
});
const result = await response.json();
console.log(result);import requests
weather_tool = {
'type': 'function',
'name': 'get_weather',
'description': 'Get the current weather in a location',
'strict': None,
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit'],
},
},
'required': ['location'],
},
}
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'What is the weather in San Francisco?',
},
],
},
],
'tools': [weather_tool],
'tool_choice': 'auto',
'max_output_tokens': 9000,
}
)
result = response.json()
print(result)curl -X POST https://llm.onerouter.pro/v1/responses \
-H "Authorization: Bearer <<API_KEY>>" \
-H "Content-Type: application/json" \
-d '{
"model": "o4-mini",
"input": [
{
"type": "message",
"role": "user",
"content": [
{
"type": "input_text",
"text": "What is the weather in San Francisco?"
}
]
}
],
"tools": [
{
"type": "function",
"name": "get_weather",
"description": "Get the current weather in a location",
"strict": null,
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
],
"tool_choice": "auto",
"max_output_tokens": 9000
}'Tool Choice Options
Control when and how tools are called:
auto
Model decides whether to call tools
none
Model will not call any tools
{type: 'function', name: 'tool_name'}
Force specific tool call
Force Specific Tool
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'Hello, how are you?',
},
],
},
],
tools: [weatherTool],
tool_choice: { type: 'function', name: 'get_weather' },
max_output_tokens: 9000,
}),
});import requests
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'Hello, how are you?',
},
],
},
],
'tools': [weather_tool],
'tool_choice': {'type': 'function', 'name': 'get_weather'},
'max_output_tokens': 9000,
}
)Disable Tool Calling
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'What is the weather in Paris?',
},
],
},
],
tools: [weatherTool],
tool_choice: 'none',
max_output_tokens: 9000,
}),
});import requests
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'What is the weather in Paris?',
},
],
},
],
'tools': [weather_tool],
'tool_choice': 'none',
'max_output_tokens': 9000,
}
)Multiple Tools
Define multiple tools for complex workflows:
const calculatorTool = {
type: 'function' as const,
name: 'calculate',
description: 'Perform mathematical calculations',
strict: null,
parameters: {
type: 'object',
properties: {
expression: {
type: 'string',
description: 'The mathematical expression to evaluate',
},
},
required: ['expression'],
},
};
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'What is 25 * 4?',
},
],
},
],
tools: [weatherTool, calculatorTool],
tool_choice: 'auto',
max_output_tokens: 9000,
}),
});calculator_tool = {
'type': 'function',
'name': 'calculate',
'description': 'Perform mathematical calculations',
'strict': None,
'parameters': {
'type': 'object',
'properties': {
'expression': {
'type': 'string',
'description': 'The mathematical expression to evaluate',
},
},
'required': ['expression'],
},
}
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'What is 25 * 4?',
},
],
},
],
'tools': [weather_tool, calculator_tool],
'tool_choice': 'auto',
'max_output_tokens': 9000,
}
)Parallel Tool Calls
The API supports parallel execution of multiple tools:
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'Calculate 10*5 and also tell me the weather in Miami',
},
],
},
],
tools: [weatherTool, calculatorTool],
tool_choice: 'auto',
max_output_tokens: 9000,
}),
});
const result = await response.json();
console.log(result);import requests
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'Calculate 10*5 and also tell me the weather in Miami',
},
],
},
],
'tools': [weather_tool, calculator_tool],
'tool_choice': 'auto',
'max_output_tokens': 9000,
}
)
result = response.json()
print(result)Tool Call Response
When tools are called, the response includes function call information:
{
'id': 'resp_00a20c9216f71dfd00691ad83eac488190921ba5878cea6955',
'object': 'response',
'created_at': 1763366974,
'status': 'completed',
'background': False,
'content_filters': None,
'error': None,
'incomplete_details': None,
'instructions': None,
'max_output_tokens': 9000,
'max_tool_calls': None,
'model': 'gpt-5.1-codex-mini',
'output': [{
'id': 'rs_00a20c9216f71dfd00691ad840ca908190bc88e56ed6c76672',
'type': 'reasoning',
'summary': []
}, {
'id': 'msg_00a20c9216f71dfd00691ad840dda88190ba851bd4524757f8',
'type': 'message',
'status': 'completed',
'content': [{
'type': 'output_text',
'annotations': [],
'logprobs': [],
'text': 'Hello! I’m doing well, thanks for asking. How can I assist you today?'
}],
'role': 'assistant'
}],
'parallel_tool_calls': True,
'previous_response_id': None,
'prompt_cache_key': None,
'reasoning': {
'effort': 'medium',
'summary': None
},
'safety_identifier': None,
'service_tier': 'default',
'store': True,
'temperature': 1.0,
'text': {
'format': {
'type': 'text'
},
'verbosity': 'medium'
},
'tool_choice': 'none',
'tools': [{
'type': 'function',
'description': 'Get the current weather in a location',
'name': 'get_weather',
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA'
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit']
}
},
'required': ['location', 'unit'],
'additionalProperties': False
},
'strict': True
}],
'top_logprobs': 0,
'top_p': 1.0,
'truncation': 'disabled',
'usage': {
'input_tokens': 78,
'input_tokens_details': {
'cached_tokens': 0
},
'output_tokens': 24,
'output_tokens_details': {
'reasoning_tokens': 0
},
'total_tokens': 102
},
'user': None,
'metadata': {}
}Tool Responses in Conversation
Include tool responses in follow-up requests:
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'What is the weather in Boston?',
},
],
},
{
type: 'function_call',
id: 'fc_1',
call_id: 'call_123',
name: 'get_weather',
arguments: JSON.stringify({ location: 'Boston, MA' }),
},
{
type: 'function_call_output',
id: 'fc_output_1',
call_id: 'call_123',
output: JSON.stringify({ temperature: '72°F', condition: 'Sunny' }),
},
{
type: 'message',
role: 'assistant',
id: 'msg_abc123',
status: 'completed',
content: [
{
type: 'output_text',
text: 'The weather in Boston is currently 72°F and sunny. This looks like perfect weather for a picnic!',
annotations: []
}
]
},
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'Is that good weather for a picnic?',
},
],
},
],
max_output_tokens: 9000,
}),
});import requests
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'What is the weather in Boston?',
},
],
},
{
'type': 'function_call',
'id': 'fc_1',
'call_id': 'call_123',
'name': 'get_weather',
'arguments': '{"location": "Boston, MA"}',
},
{
'type': 'function_call_output',
'id': 'fc_output_1',
'call_id': 'call_123',
'output': '{"temperature": "72°F", "condition": "Sunny"}',
},
{
'type': 'message',
'role': 'assistant',
'id': 'msg_abc123',
'status': 'completed',
'content': [
{
'type': 'output_text',
'text': 'The weather in Boston is currently 72°F and sunny. This looks like perfect weather for a picnic!',
'annotations': []
}
]
},
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'Is that good weather for a picnic?',
},
],
},
],
'max_output_tokens': 9000,
}
)The `id` field is required for `function_call_output` objects when including tool responses in conversation history.
Streaming Tool Calls
Monitor tool calls in real-time with streaming:
const response = await fetch('https://llm.onerouter.pro/v1/responses', {
method: 'POST',
headers: {
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'o4-mini',
input: [
{
type: 'message',
role: 'user',
content: [
{
type: 'input_text',
text: 'What is the weather like in Tokyo, Japan? Please check the weather.',
},
],
},
],
tools: [weatherTool],
tool_choice: 'auto',
stream: true,
max_output_tokens: 9000,
}),
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') return;
try {
const parsed = JSON.parse(data);
if (parsed.type === 'response.output_item.added' &&
parsed.item?.type === 'function_call') {
console.log('Function call:', parsed.item.name);
}
if (parsed.type === 'response.function_call_arguments.done') {
console.log('Arguments:', parsed.arguments);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}import requests
import json
response = requests.post(
'https://llm.onerouter.pro/v1/responses',
headers={
'Authorization': 'Bearer <<API_KEY>>',
'Content-Type': 'application/json',
},
json={
'model': 'o4-mini',
'input': [
{
'type': 'message',
'role': 'user',
'content': [
{
'type': 'input_text',
'text': 'What is the weather like in Tokyo, Japan? Please check the weather.',
},
],
},
],
'tools': [weather_tool],
'tool_choice': 'auto',
'stream': True,
'max_output_tokens': 9000,
},
stream=True
)
for line in response.iter_lines():
if line:
line_str = line.decode('utf-8')
if line_str.startswith('data: '):
data = line_str[6:]
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if (parsed.get('type') == 'response.output_item.added' and
parsed.get('item', {}).get('type') == 'function_call'):
print(f"Function call: {parsed['item']['name']}")
if parsed.get('type') == 'response.function_call_arguments.done':
print(f"Arguments: {parsed.get('arguments', '')}")
except json.JSONDecodeError:
continueTool Validation
Ensure tool calls have proper structure:
{
"type": "function_call",
"id": "fc_abc123",
"call_id": "call_xyz789",
"name": "get_weather",
"arguments": "{\"location\":\"Seattle, WA\"}"
}Required fields:
type: Always "function_call"id: Unique identifier for the function call objectname: Function name matching tool definitionarguments: Valid JSON string with function parameterscall_id: Unique identifier for the call
Best Practices
Clear descriptions: Provide detailed function descriptions and parameter explanations
Proper schemas: Use valid JSON Schema for parameters
Error handling: Handle cases where tools might not be called
Parallel execution: Design tools to work independently when possible
Conversation flow: Include tool responses in follow-up requests for context
Next Steps
Explore Reasoning with tools
Review Basic Usage fundamentals
Last updated