Loading...
Loading...
Build DAG-based AI pipelines connecting Gradio Spaces, HuggingFace models, and Python functions into visual workflows. Use when asked to create a workflow, build a pipeline, connect AI models, chain Gradio Spaces, create a daggr app, build multi-step AI applications, or orchestrate ML models. Triggers on: "build a workflow", "create a pipeline", "connect models", "daggr", "chain Spaces", "AI pipeline".
npx skill4agent add gradio-app/daggr daggrfrom daggr import GradioNode, FnNode, InferenceNode, Graph, ItemList
import gradio as gr
graph = Graph(name="My Workflow", nodes=[node1, node2, ...])
graph.launch() # Starts web server with visual DAG UInode = GradioNode(
space_or_url="owner/space-name",
api_name="/endpoint",
inputs={
"param": gr.Textbox(label="Input"), # UI input
"other": other_node.output_port, # Port connection
"fixed": "constant_value", # Fixed value
},
postprocess=lambda *returns: returns[0], # Transform response
outputs={"result": gr.Image(label="Output")},
)
# Example: image generation
img = GradioNode("Tongyi-MAI/Z-Image-Turbo", api_name="/generate",
inputs={"prompt": gr.Textbox(), "resolution": "1024x1024 ( 1:1 )"},
postprocess=lambda imgs, *_: imgs[0]["image"],
outputs={"image": gr.Image()})https://huggingface.co/api/spaces/semantic-search?q=generate+music+for+a+video&sdk=gradio&includeNonRunning=falsehttps://huggingface.co/api/spaces/semantic-search?category=image-generation&sdk=gradio&includeNonRunning=falsedef process(input1: str, input2: int) -> str:
return f"{input1}: {input2}"
node = FnNode(
fn=process,
inputs={"input1": gr.Textbox(), "input2": other_node.port},
outputs={"result": gr.Textbox()},
)https://huggingface.co/api/models?inference_provider=all&pipeline_tag=text-to-imagenode = InferenceNode(
model="org/model:provider", # model:provider (fal-ai, replicate, together, etc.)
inputs={"image": other_node.image, "prompt": gr.Textbox()},
outputs={"image": gr.Image()},
)https://huggingface.co/settings/tokens/new?ownUserPermissions=inference.serverless.write&tokenType=fineGrainedhttps://huggingface.co/subscribe/proinputs={...}inputs={"param": previous_node.output_port} # Basic connection
inputs={"item": items_node.items.field_name} # Scattered (per-item)
inputs={"all": scattered_node.output.all()} # Gathered (collect list)def gen_items(n: int) -> list:
return [{"text": f"Item {i}"} for i in range(n)]
items = FnNode(fn=gen_items,
outputs={"items": ItemList(text=gr.Textbox())})
# Runs once per item
process = FnNode(fn=process_item,
inputs={"text": items.items.text},
outputs={"result": gr.Textbox()})
# Collect all results
final = FnNode(fn=combine,
inputs={"all": process.result.all()},
outputs={"out": gr.Textbox()})curl -s "https://<space-subdomain>.hf.space/gradio_api/openapi.json"<space-subdomain>Tongyi-MAI/Z-Image-Turbotongyi-mai-z-image-turbopath = file.get("path") if isinstance(file, dict) else filepostprocess=lambda imgs, seed, num: imgs[0]["image"].test()node.test(param="value")# Image Generation
GradioNode("Tongyi-MAI/Z-Image-Turbo", api_name="/generate",
inputs={"prompt": gr.Textbox(), "resolution": "1024x1024 ( 1:1 )"},
postprocess=lambda imgs, *_: imgs[0]["image"],
outputs={"image": gr.Image()})
# Text-to-Speech
GradioNode("Qwen/Qwen3-TTS", api_name="/generate_voice_design",
inputs={"text": gr.Textbox(), "language": "English", "voice_description": "..."},
postprocess=lambda audio, status: audio,
outputs={"audio": gr.Audio()})
# Image-to-Video
GradioNode("alexnasa/ltx-2-TURBO", api_name="/generate_video",
inputs={"input_image": img.image, "prompt": gr.Textbox(), "duration": 5},
postprocess=lambda video, seed: video,
outputs={"video": gr.Video()})
# ffmpeg composition (import tempfile, subprocess)
def combine(video: str|dict, audio: str|dict) -> str:
v = video.get("path") if isinstance(video, dict) else video
a = audio.get("path") if isinstance(audio, dict) else audio
out = tempfile.mktemp(suffix=".mp4")
subprocess.run(["ffmpeg","-y","-i",v,"-i",a,"-shortest",out])
return outuvx --python 3.12 daggr workflow.py & # Launch in background, hot reloads on file changeshf auth loginHF_TOKEN--secret HF_TOKEN=xxxdaggr deploy workflow.pydaggr deploy workflow.py --name my-space # Custom Space name
daggr deploy workflow.py --org huggingface # Deploy to an organization
daggr deploy workflow.py --private # Private Space
daggr deploy workflow.py --hardware t4-small # GPU (t4-small, t4-medium, a10g-small, etc.)
daggr deploy workflow.py --secret KEY=value # Add secrets (repeatable)
daggr deploy workflow.py --dry-run # Preview without deploying