Agent Tool Calling with CreativeAPI
Complete guide to integrating CreativeAPI with AI agents using function calling. Works with Claude, GPT-4, Gemini, and LangChain.
Claude (Anthropic)
Native tool use with Claude 3.5 Sonnet. Best for complex multi-step agents.
GPT-4 (OpenAI)
Function calling with GPT-4.1. Works with Assistants API too.
Gemini (Google)
Function declarations with Gemini 2.5. Fast and cost-effective.
Tool Schemas by Provider
Claude Tool Schema
{
"name": "generate_video",
"description": "Generate a video using AI. Returns a job ID for async polling or webhook delivery.",
"input_schema": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Text description of the video to generate"
},
"model": {
"type": "string",
"enum": ["auto", "kling-v3-pro", "seedance-1.5", "veo-3.1"],
"description": "Model to use. 'auto' picks best available."
},
"duration": {
"type": "integer",
"minimum": 3,
"maximum": 15,
"default": 5,
"description": "Video duration in seconds"
},
"aspect_ratio": {
"type": "string",
"enum": ["16:9", "9:16", "1:1", "4:3"],
"default": "16:9"
}
},
"required": ["prompt"]
}
}GPT-4 Function Schema
{
"type": "function",
"function": {
"name": "generate_video",
"description": "Generate a video using AI. Async job with webhook delivery.",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Text description of the video"
},
"model": {
"type": "string",
"enum": ["auto", "kling-v3-pro", "seedance-1.5", "veo-3.1"],
"default": "auto"
},
"duration": {
"type": "integer",
"minimum": 3,
"maximum": 15,
"default": 5
},
"webhook_url": {
"type": "string",
"description": "URL to receive completion notification"
}
},
"required": ["prompt"]
}
}
}Gemini Function Declaration
{
"name": "generate_video",
"description": "Generate a video using AI video generation models",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Video description"
},
"model": {
"type": "string",
"enum": ["auto", "kling-v3-pro", "seedance-1.5", "veo-3.1"],
"default": "auto"
},
"duration": {
"type": "integer",
"default": 5
}
},
"required": ["prompt"]
}
}Complete Examples
Claude Agent Example
import Anthropic from '@anthropic-ai/sdk';
import CreativeAI from '@creativeai/node-sdk';
const anthropic = new Anthropic();
const creative = new CreativeAI({ apiKey: process.env.CREATIVEAI_API_KEY });
// Define tools for Claude
const tools = [
{
name: 'generate_video',
description: 'Generate a video from a text prompt',
input_schema: {
type: 'object',
properties: {
prompt: { type: 'string', description: 'Video description' },
duration: { type: 'integer', default: 5 },
model: { type: 'string', enum: ['auto', 'kling-v3-pro', 'seedance-1.5'] }
},
required: ['prompt']
}
},
{
name: 'generate_image',
description: 'Generate an image from a text prompt',
input_schema: {
type: 'object',
properties: {
prompt: { type: 'string', description: 'Image description' },
model: { type: 'string', enum: ['gpt-image-1', 'flux-pro', 'seedream-3'] }
},
required: ['prompt']
}
}
];
// Agent loop
async function runAgent(userMessage: string) {
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-20250514',
max_tokens: 4096,
tools: tools.map(t => ({ name: t.name, input_schema: t.input_schema })),
messages: [{ role: 'user', content: userMessage }]
});
// Handle tool calls
for (const block of response.content) {
if (block.type === 'tool_use') {
let result;
if (block.name === 'generate_video') {
result = await creative.videos.generate({
prompt: block.input.prompt,
model: block.input.model || 'auto',
duration: block.input.duration || 5,
webhook_url: process.env.WEBHOOK_URL
});
} else if (block.name === 'generate_image') {
result = await creative.images.generate({
prompt: block.input.prompt,
model: block.input.model || 'gpt-image-1'
});
}
console.log('Tool result:', result);
return result;
}
}
}
// Usage
await runAgent('Generate a 5-second video of a drone flying over a tropical beach');GPT-4 Agent Example
import OpenAI from 'openai';
import CreativeAI from '@creativeai/node-sdk';
const openai = new OpenAI();
const creative = new CreativeAI({ apiKey: process.env.CREATIVEAI_API_KEY });
// Define tools for GPT-4
const tools: OpenAI.Chat.Completions.ChatCompletionTool[] = [
{
type: 'function',
function: {
name: 'generate_video',
description: 'Generate an AI video. Returns job ID for async tracking.',
parameters: {
type: 'object',
properties: {
prompt: { type: 'string', description: 'Video description' },
duration: { type: 'integer', default: 5 },
model: { type: 'string', enum: ['auto', 'kling-v3-pro', 'seedance-1.5'] }
},
required: ['prompt']
}
}
},
{
type: 'function',
function: {
name: 'get_job_status',
description: 'Check status of async video generation job',
parameters: {
type: 'object',
properties: {
job_id: { type: 'string', description: 'Job ID from generate_video' }
},
required: ['job_id']
}
}
}
];
// Agent with automatic tool execution
async function runAgent(userMessage: string) {
const response = await openai.chat.completions.create({
model: 'gpt-4.1',
messages: [{ role: 'user', content: userMessage }],
tools,
tool_choice: 'auto'
});
const message = response.choices[0].message;
if (message.tool_calls) {
for (const toolCall of message.tool_calls) {
const args = JSON.parse(toolCall.function.arguments);
if (toolCall.function.name === 'generate_video') {
const job = await creative.videos.generate({
prompt: args.prompt,
model: args.model || 'auto',
duration: args.duration || 5
});
console.log('Started job:', job.id);
return job;
}
if (toolCall.function.name === 'get_job_status') {
const status = await creative.videos.getStatus(args.job_id);
console.log('Job status:', status);
return status;
}
}
}
return message.content;
}
// Usage
await runAgent('Create a timelapse video of a flower blooming');Gemini Agent Example
import { GoogleGenerativeAI } from '@google/generative-ai';
import CreativeAI from '@creativeai/node-sdk';
const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
const creative = new CreativeAI({ apiKey: process.env.CREATIVEAI_API_KEY });
const model = genAI.getGenerativeModel({
model: 'gemini-2.5-flash',
tools: [
{
functionDeclarations: [
{
name: 'generate_video',
description: 'Generate an AI video from text prompt',
parameters: {
type: 'object',
properties: {
prompt: { type: 'string', description: 'Video description' },
duration: { type: 'integer', default: 5 },
model: { type: 'string', enum: ['auto', 'kling-v3-pro', 'seedance-1.5'] }
},
required: ['prompt']
}
}
]
}
]
});
async function runAgent(userMessage: string) {
const result = await model.generateContent(userMessage);
const response = result.response;
// Check for function calls
const functionCalls = response.functionCalls?.();
if (functionCalls) {
for (const call of functionCalls) {
if (call.name === 'generate_video') {
const job = await creative.videos.generate({
prompt: call.args.prompt as string,
model: call.args.model as string || 'auto',
duration: call.args.duration as number || 5
});
console.log('Video job started:', job.id);
return job;
}
}
}
return response.text();
}
// Usage
await runAgent('Generate a video of clouds passing over mountains at sunset');LangChain Integration
import { ChatOpenAI } from '@langchain/openai';
import { AgentExecutor, createToolCallingAgent } from 'langchain/agents';
import { pull } from 'langchain/hub';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import CreativeAI from '@creativeai/node-sdk';
const creative = new CreativeAI({ apiKey: process.env.CREATIVEAI_API_KEY });
// Define tools with Zod schemas
const videoTool = new DynamicStructuredTool({
name: 'generate_video',
description: 'Generate an AI video from text. Use for video creation requests.',
schema: z.object({
prompt: z.string().describe('Video description'),
duration: z.number().min(3).max(15).default(5),
model: z.enum(['auto', 'kling-v3-pro', 'seedance-1.5']).default('auto')
}),
func: async ({ prompt, duration, model }) => {
const job = await creative.videos.generate({
prompt,
duration,
model,
webhook_url: process.env.WEBHOOK_URL
});
return JSON.stringify({ job_id: job.id, status: 'processing' });
}
});
const imageTool = new DynamicStructuredTool({
name: 'generate_image',
description: 'Generate an AI image from text. Use for image creation requests.',
schema: z.object({
prompt: z.string().describe('Image description'),
model: z.enum(['gpt-image-1', 'flux-pro', 'seedream-3']).default('gpt-image-1')
}),
func: async ({ prompt, model }) => {
const result = await creative.images.generate({ prompt, model });
return JSON.stringify({ url: result.data[0].url });
}
});
// Create LangChain agent
const llm = new ChatOpenAI({ model: 'gpt-4.1', temperature: 0 });
const agent = await createToolCallingAgent({
llm,
tools: [videoTool, imageTool],
prompt: await pull('hwchase17/openai-tools-agent')
});
const agentExecutor = new AgentExecutor({ agent, tools: [videoTool, imageTool] });
// Run agent
const result = await agentExecutor.invoke({
input: 'Create a video of a robot painting a sunset'
});
console.log(result.output);Webhook Handler for Async Jobs
Video generation is async. Handle completion via webhooks:
// Handle CreativeAPI webhooks in Next.js
// app/api/webhooks/creativeai/route.ts
import { NextRequest, NextResponse } from 'next/server';
import { createHmac } from 'crypto';
const WEBHOOK_SECRET = process.env.CREATIVEAI_WEBHOOK_SECRET!;
function verifySignature(payload: string, signature: string): boolean {
const expected = createHmac('sha256', WEBHOOK_SECRET)
.update(payload)
.digest('hex');
return signature === expected;
}
export async function POST(request: NextRequest) {
const body = await request.text();
const signature = request.headers.get('x-creativeai-signature') || '';
// Verify webhook authenticity
if (!verifySignature(body, signature)) {
return NextResponse.json({ error: 'Invalid signature' }, { status: 401 });
}
const event = JSON.parse(body);
switch (event.type) {
case 'video.completed':
console.log('Video ready:', event.data.output_url);
// Notify your agent/user
await notifyAgent(event.data);
break;
case 'video.failed':
console.log('Video failed:', event.data.error);
// Handle failure, maybe retry
break;
}
return NextResponse.json({ received: true });
}
async function notifyAgent(videoData: any) {
// Push to your agent's message queue
// Or update database for agent to poll
}Key Implementation Points
Async Generation
Videos take 30-90 seconds. Use webhooks or poll status endpoint. Don't block your agent waiting.
Webhook Delivery
3x retry with backoff (0s, 5s, 30s). HMAC signature for verification. 24-hour status API fallback.
Multi-model Failover
Use model='auto' for automatic failover. Agent doesn't need to handle provider errors.
Sync Images
Image generation is synchronous. Returns URLs immediately. No polling needed.
Available Agent Tools
| Tool | Description | Async | Use When |
|---|---|---|---|
generate_video | Create video from text | Yes | User requests video content |
generate_image | Create image from text | No | User requests image/picture |
get_job_status | Check video job status | No | Polling for completion |
list_models | Discover available models | No | User asks about capabilities |
Related Tutorials