Loading...
Loading...
Compare original and translation side by side
PREREQUISITE: Runfirst. Run+check-compatibilityto load the latest API reference before integrating. Requires+fetch-api-referencefor API credentials. Requires+setup-api-keyfor local audio/video files.+integrate-uploads
前置条件: 先运行。集成前请运行+check-compatibility加载最新的API参考文档。需要运行+fetch-api-reference配置API凭证。处理本地音频/视频文件需要先配置+setup-api-key。+integrate-uploads
| Model | Endpoint | Use Case | Cost |
|---|---|---|---|
| | Text to speech | 1 credit/50 chars |
| | Sound effect generation | 1-2 credits |
| | Isolate voice from audio | 1 credit/6 sec |
| | Dub audio to other languages | 1 credit/2 sec |
| | Voice conversion | 1 credit/3 sec |
| 模型 | 接口端点 | 适用场景 | 费用 |
|---|---|---|---|
| | 文本转语音 | 1积分/50字符 |
| | 音效生成 | 1-2积分 |
| | 从音频中分离人声 | 1积分/6秒 |
| | 将音频配音为其他语言 | 1积分/2秒 |
| | 声音转换 | 1积分/3秒 |
import RunwayML from '@runwayml/sdk';
const client = new RunwayML();
const task = await client.textToSpeech.create({
model: 'eleven_multilingual_v2',
text: 'Hello, welcome to our application!',
voiceId: 'voice_id_here' // See voice listing endpoint
}).waitForTaskOutput();
const audioUrl = task.output[0];import RunwayML from '@runwayml/sdk';
const client = new RunwayML();
const task = await client.textToSpeech.create({
model: 'eleven_multilingual_v2',
text: 'Hello, welcome to our application!',
voiceId: 'voice_id_here' // See voice listing endpoint
}).waitForTaskOutput();
const audioUrl = task.output[0];from runwayml import RunwayML
client = RunwayML()
task = client.text_to_speech.create(
model='eleven_multilingual_v2',
text='Hello, welcome to our application!',
voice_id='voice_id_here'
).wait_for_task_output()
audio_url = task.output[0]from runwayml import RunwayML
client = RunwayML()
task = client.text_to_speech.create(
model='eleven_multilingual_v2',
text='Hello, welcome to our application!',
voice_id='voice_id_here'
).wait_for_task_output()
audio_url = task.output[0]const task = await client.soundEffect.create({
model: 'eleven_text_to_sound_v2',
promptText: 'Thunder rolling across a stormy sky'
}).waitForTaskOutput();task = client.sound_effect.create(
model='eleven_text_to_sound_v2',
prompt_text='Thunder rolling across a stormy sky'
).wait_for_task_output()const task = await client.soundEffect.create({
model: 'eleven_text_to_sound_v2',
promptText: 'Thunder rolling across a stormy sky'
}).waitForTaskOutput();task = client.sound_effect.create(
model='eleven_text_to_sound_v2',
prompt_text='Thunder rolling across a stormy sky'
).wait_for_task_output()// If using a local file, upload first
const upload = await client.uploads.createEphemeral(
fs.createReadStream('/path/to/noisy-audio.mp3')
);
const task = await client.voiceIsolation.create({
model: 'eleven_voice_isolation',
audio: upload.runwayUri
}).waitForTaskOutput();// If using a local file, upload first
const upload = await client.uploads.createEphemeral(
fs.createReadStream('/path/to/noisy-audio.mp3')
);
const task = await client.voiceIsolation.create({
model: 'eleven_voice_isolation',
audio: upload.runwayUri
}).waitForTaskOutput();const task = await client.voiceDubbing.create({
model: 'eleven_voice_dubbing',
audio: 'https://example.com/speech.mp3',
targetLanguage: 'es' // Spanish
}).waitForTaskOutput();const task = await client.voiceDubbing.create({
model: 'eleven_voice_dubbing',
audio: 'https://example.com/speech.mp3',
targetLanguage: 'es' // Spanish
}).waitForTaskOutput();const task = await client.speechToSpeech.create({
model: 'eleven_multilingual_sts_v2',
audio: 'https://example.com/original-speech.mp3',
voiceId: 'target_voice_id'
}).waitForTaskOutput();const task = await client.speechToSpeech.create({
model: 'eleven_multilingual_sts_v2',
audio: 'https://example.com/original-speech.mp3',
voiceId: 'target_voice_id'
}).waitForTaskOutput();import RunwayML from '@runwayml/sdk';
import express from 'express';
const client = new RunwayML();
const app = express();
app.use(express.json());
app.post('/api/text-to-speech', async (req, res) => {
try {
const { text, voiceId } = req.body;
const task = await client.textToSpeech.create({
model: 'eleven_multilingual_v2',
text,
voiceId
}).waitForTaskOutput();
res.json({ audioUrl: task.output[0] });
} catch (error) {
console.error('TTS failed:', error);
res.status(500).json({ error: error.message });
}
});import RunwayML from '@runwayml/sdk';
import express from 'express';
const client = new RunwayML();
const app = express();
app.use(express.json());
app.post('/api/text-to-speech', async (req, res) => {
try {
const { text, voiceId } = req.body;
const task = await client.textToSpeech.create({
model: 'eleven_multilingual_v2',
text,
voiceId
}).waitForTaskOutput();
res.json({ audioUrl: task.output[0] });
} catch (error) {
console.error('TTS failed:', error);
res.status(500).json({ error: error.message });
}
});from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from runwayml import RunwayML
app = FastAPI()
client = RunwayML()
class SoundRequest(BaseModel):
prompt: str
@app.post("/api/sound-effect")
async def generate_sound(req: SoundRequest):
try:
task = client.sound_effect.create(
model='eleven_text_to_sound_v2',
prompt_text=req.prompt
).wait_for_task_output()
return {"audio_url": task.output[0]}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from runwayml import RunwayML
app = FastAPI()
client = RunwayML()
class SoundRequest(BaseModel):
prompt: str
@app.post("/api/sound-effect")
async def generate_sound(req: SoundRequest):
try:
task = client.sound_effect.create(
model='eleven_text_to_sound_v2',
prompt_text=req.prompt
).wait_for_task_output()
return {"audio_url": task.output[0]}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))+integrate-uploads+api-reference+integrate-uploads+api-reference