Loading...
Loading...
Expert skill for using MOSS-TTS-Nano, a 0.1B parameter multilingual real-time TTS model that runs on CPU with voice cloning support.
npx skill4agent add aradotso/trending-skills moss-tts-nano-speechSkill by ara.so — Daily 2026 Skills collection.
conda create -n moss-tts-nano python=3.12 -y
conda activate moss-tts-nano
git clone https://github.com/OpenMOSS/MOSS-TTS-Nano.git
cd MOSS-TTS-Nano
pip install -r requirements.txt
pip install -e .conda install -c conda-forge pynini=2.1.6.post1 -y
pip install git+https://github.com/WhizZest/WeTextProcessing.gitpip install -e .moss-tts-nanoOpenMOSS-Team/MOSS-TTS-NanoOpenMOSS-Team/MOSS-Audio-Tokenizer-Nanoopenmoss/MOSS-TTS-Nanoopenmoss/MOSS-Audio-Tokenizer-Nanomoss-tts-nano generate \
--prompt-speech assets/audio/zh_1.wav \
--text "欢迎关注模思智能、上海创智学院与复旦大学自然语言处理实验室。"generated_audio/moss_tts_nano_output.wavmoss-tts-nano generate \
--prompt-speech assets/audio/zh_1.wav \
--text-file my_script.txt \
--output output.wavmoss-tts-nano serve
# or directly:
python app.pyhttp://127.0.0.1:18083python infer.py \
--prompt-audio-path assets/audio/zh_1.wav \
--text "Hello, this is a test of MOSS-TTS-Nano."generated_audio/infer_output.wavfrom infer import MossTTSNanoInference
# Initialize once (downloads weights on first run)
tts = MossTTSNanoInference()
# Voice clone: synthesize text in the style of the reference audio
audio = tts.infer(
text="欢迎使用MOSS语音合成系统。",
prompt_audio_path="assets/audio/zh_1.wav",
)
# Save output
import soundfile as sf
sf.write("output.wav", audio, samplerate=48000)from infer import MossTTSNanoInference
tts = MossTTSNanoInference()
audio = tts.infer(
text="Welcome to MOSS TTS Nano, a tiny but capable text to speech model.",
prompt_audio_path="assets/audio/en_sample.wav",
)
import soundfile as sf
sf.write("english_output.wav", audio, samplerate=48000)from infer import MossTTSNanoInference
import soundfile as sf
import numpy as np
tts = MossTTSNanoInference()
chunks = []
for audio_chunk in tts.infer_stream(
text="This sentence is generated chunk by chunk for low latency playback.",
prompt_audio_path="assets/audio/en_sample.wav",
):
chunks.append(audio_chunk)
# process or play chunk in real time here
full_audio = np.concatenate(chunks)
sf.write("streamed_output.wav", full_audio, samplerate=48000)from infer import MossTTSNanoInference
tts = MossTTSNanoInference()
long_text = """
MOSS-TTS-Nano supports long-form synthesis through automatic chunking.
Each chunk uses the same reference voice, producing consistent speaker identity
across the entire output even for multi-paragraph documents.
"""
audio = tts.infer(
text=long_text,
prompt_audio_path="assets/audio/en_sample.wav",
)
import soundfile as sf
sf.write("long_form_output.wav", audio, samplerate=48000)moss-tts-nano servepython app.pyimport requests
import base64
import soundfile as sf
import io
import numpy as np
# Read reference audio as base64
with open("assets/audio/zh_1.wav", "rb") as f:
ref_audio_b64 = base64.b64encode(f.read()).decode()
response = requests.post(
"http://127.0.0.1:18083/generate",
json={
"text": "你好,这是一个语音合成测试。",
"prompt_audio_base64": ref_audio_b64,
},
)
data = response.json()
audio_bytes = base64.b64decode(data["audio_base64"])
audio_array, sr = sf.read(io.BytesIO(audio_bytes))
sf.write("api_output.wav", audio_array, samplerate=sr)import requests
with open("assets/audio/zh_1.wav", "rb") as f:
ref_audio_b64 = __import__("base64").b64encode(f.read()).decode()
with requests.post(
"http://127.0.0.1:18083/generate_stream",
json={
"text": "流式语音合成示例,适合实时播放场景。",
"prompt_audio_base64": ref_audio_b64,
},
stream=True,
) as resp:
with open("stream_output.wav", "wb") as out:
for chunk in resp.iter_content(chunk_size=4096):
out.write(chunk)| Code | Language | Code | Language | Code | Language |
|---|---|---|---|---|---|
| zh | Chinese | en | English | de | German |
| es | Spanish | fr | French | ja | Japanese |
| it | Italian | hu | Hungarian | ko | Korean |
| ru | Russian | fa | Persian | ar | Arabic |
| pl | Polish | pt | Portuguese | cs | Czech |
| da | Danish | sv | Swedish | el | Greek |
| tr | Turkish |
| Flag | Alias | Description |
|---|---|---|
| — | Path to reference WAV for voice cloning ( |
| — | Same purpose in |
| — | Input text string |
| — | Path to plain text file for long-form synthesis |
| — | Output WAV file path (default varies by entrypoint) |
from infer import MossTTSNanoInference
import soundfile as sf
tts = MossTTSNanoInference()
ref = "assets/audio/zh_1.wav"
sentences = [
"第一句话,用于批量合成测试。",
"第二句话,保持相同的音色。",
"第三句话,输出独立的音频文件。",
]
for i, sentence in enumerate(sentences):
audio = tts.infer(text=sentence, prompt_audio_path=ref)
sf.write(f"output_{i:02d}.wav", audio, samplerate=48000)
print(f"Saved output_{i:02d}.wav")import sounddevice as sd
import numpy as np
from infer import MossTTSNanoInference
tts = MossTTSNanoInference()
buffer = []
for chunk in tts.infer_stream(
text="Real-time playback example using sounddevice.",
prompt_audio_path="assets/audio/en_sample.wav",
):
buffer.append(chunk)
audio = np.concatenate(buffer)
sd.play(audio, samplerate=48000)
sd.wait()import gradio as gr
import soundfile as sf
import numpy as np
import io
from infer import MossTTSNanoInference
tts = MossTTSNanoInference()
def synthesize(reference_audio_path: str, text: str):
audio = tts.infer(text=text, prompt_audio_path=reference_audio_path)
# Return as (sample_rate, numpy_array) tuple for Gradio Audio component
return (48000, audio)
demo = gr.Interface(
fn=synthesize,
inputs=[
gr.Audio(type="filepath", label="Reference Voice"),
gr.Textbox(label="Text to synthesize"),
],
outputs=gr.Audio(label="Generated Speech"),
title="MOSS-TTS-Nano Voice Clone",
)
demo.launch()# Use conda to get pynini, then install from source
conda install -c conda-forge pynini=2.1.6.post1 -y
pip install git+https://github.com/WhizZest/WeTextProcessing.gitHF_ENDPOINTexport HF_ENDPOINT=https://hf-mirror.com
python infer.py --prompt-audio-path assets/audio/zh_1.wav --text "测试"pip install modelscopeopenmoss/MOSS-TTS-Nanoopenmoss/MOSS-Audio-Tokenizer-Nanoinfer_streammoss-tts-nano# Re-run editable install inside the active conda env
pip install -e .
which moss-tts-nano # should resolve now# Default port is 18083; check what occupies it
lsof -i :18083
# Kill if needed, then relaunch
moss-tts-nano serve| Entrypoint | Default output path |
|---|---|
| |
| |
| returned via HTTP response |
generated_audio/