Loading...
Loading...
Generate videos from text prompts or animate static images using ModelsLab's v7 Video Fusion API. Supports text-to-video, image-to-video, video-to-video, lip-sync, and motion control with 40+ models including Seedance, Wan, Veo, Sora, Kling, and Hailuo.
npx skill4agent add modelslab/skills modelslab-video-generationPOST https://modelslab.com/api/v7/video-fusion/text-to-videoPOST https://modelslab.com/api/v7/video-fusion/image-to-videoPOST https://modelslab.com/api/v7/video-fusion/video-to-videoPOST https://modelslab.com/api/v7/video-fusion/lip-syncPOST https://modelslab.com/api/v7/video-fusion/motion-controlPOST https://modelslab.com/api/v7/video-fusion/fetch/{id}Note: v6 endpoints (, etc.) still work but v7 is the current version./api/v6/video/text2video
# Search all video models
modelslab models search --feature video_fusion
# Search by name
modelslab models search --search "seedance"
modelslab models search --search "wan"
modelslab models search --search "veo"
# Get model details
modelslab models detail --id seedance-t2vimport requests
import time
def generate_video(prompt, api_key, model_id="seedance-t2v"):
"""Generate a video from a text prompt.
Args:
prompt: Text description of the video
api_key: Your ModelsLab API key
model_id: Video model to use
"""
response = requests.post(
"https://modelslab.com/api/v7/video-fusion/text-to-video",
json={
"key": api_key,
"model_id": model_id,
"prompt": prompt,
"negative_prompt": "low quality, blurry, static, distorted"
}
)
data = response.json()
if data["status"] == "error":
raise Exception(f"Error: {data['message']}")
if data["status"] == "success":
return data["output"][0]
# Video generation is async - poll for results
request_id = data["id"]
print(f"Video processing... Request ID: {request_id}")
print(f"Estimated time: {data.get('eta', 'unknown')} seconds")
return poll_video_result(request_id, api_key)
def poll_video_result(request_id, api_key, timeout=600):
"""Poll for video generation results."""
start_time = time.time()
while time.time() - start_time < timeout:
fetch = requests.post(
f"https://modelslab.com/api/v7/video-fusion/fetch/{request_id}",
json={"key": api_key}
)
result = fetch.json()
if result["status"] == "success":
return result["output"][0]
elif result["status"] == "failed":
raise Exception(result.get("message", "Generation failed"))
print(f"Status: processing... ({int(time.time() - start_time)}s elapsed)")
time.sleep(10)
raise Exception("Timeout waiting for video generation")
# Usage
video_url = generate_video(
"A spaceship flying through an asteroid field, cinematic, 4K",
"your_api_key",
model_id="seedance-t2v"
)
print(f"Video ready: {video_url}")def animate_image(image_url, prompt, api_key, model_id="seedance-i2v"):
"""Animate a static image based on a motion prompt.
Args:
image_url: URL of the image to animate
prompt: Description of desired motion/animation
model_id: Video model for image-to-video
"""
response = requests.post(
"https://modelslab.com/api/v7/video-fusion/image-to-video",
json={
"key": api_key,
"model_id": model_id,
"init_image": [image_url], # v7 expects array
"prompt": prompt,
"negative_prompt": "static, still, low quality, blurry"
}
)
data = response.json()
if data["status"] == "success":
return data["output"][0]
elif data["status"] == "processing":
return poll_video_result(data["id"], api_key)
else:
raise Exception(data.get("message", "Unknown error"))
# Animate a landscape
video = animate_image(
"https://example.com/landscape.jpg",
"The clouds moving slowly across the sky, birds flying in the distance",
"your_api_key",
model_id="seedance-i2v"
)
print(f"Animated video: {video}")def transform_video(video_url, prompt, api_key, model_id="wan2.1"):
"""Transform an existing video with a new style or content.
Args:
video_url: URL of the source video
prompt: Description of desired transformation
"""
response = requests.post(
"https://modelslab.com/api/v7/video-fusion/video-to-video",
json={
"key": api_key,
"model_id": model_id,
"init_video": [video_url], # v7 expects array
"prompt": prompt
}
)
data = response.json()
if data["status"] == "processing":
return poll_video_result(data["id"], api_key)
elif data["status"] == "success":
return data["output"][0]def lip_sync(video_url, audio_url, api_key, model_id="lipsync-2"):
"""Sync lip movements to audio.
Args:
video_url: URL of the video with a face
audio_url: URL of the audio to sync to
"""
response = requests.post(
"https://modelslab.com/api/v7/video-fusion/lip-sync",
json={
"key": api_key,
"model_id": model_id,
"init_video": video_url,
"init_audio": audio_url
}
)
data = response.json()
if data["status"] == "processing":
return poll_video_result(data["id"], api_key)
elif data["status"] == "success":
return data["output"][0]seedance-t2vseedance-1.0-pro-fast-t2vwan2.6-t2vwan2.1veo2veo3sora-2Hailuo-2.3-t2vkling-v2-5-turbo-t2vseedance-i2vseedance-1.0-pro-i2vwan2.6-i2vHailuo-2.3-i2vkling-v2-1-i2vlipsync-2kling-motion-controlomni-human| Parameter | Description | Recommended Values |
|---|---|---|
| Video generation model (required) | See model tables above |
| Text description of video content | Be specific about motion and scene |
| What to avoid | "static, low quality, blurry" |
| Source image for i2v (array) | |
| Source video for v2v (array) | |
| Audio for lip-sync/video | URL string |
| Video dimensions (512-1024) | 512, 768, 1024 |
| Video length in seconds | 4-30 |
| Aspect ratio | "16:9", "9:16", "1:1" |
| Async notification URL | URL string |
| Custom tracking identifier | Any string |
Bad: "A cat"
Good: "A cat walking through a garden, looking around curiously, sunlight filtering through trees"
Include: Action, movement, camera motion, atmosphere# Video generation is ALWAYS async
# Always implement polling or use webhooks
if data["status"] == "processing":
video = poll_video_result(data["id"], api_key)payload = {
"key": api_key,
"model_id": "seedance-t2v",
"prompt": "...",
"webhook": "https://yourserver.com/webhook/video",
"track_id": "video_001"
}try:
video = generate_video(prompt, api_key, model_id="seedance-t2v")
print(f"Video generated: {video}")
except Exception as e:
print(f"Video generation failed: {e}")modelslab-webhooksmodelslab-model-discoverymodelslab-image-generationmodelslab-audio-generationmodelslab-chat-generationmodelslab-webhooks