Loading...
Loading...
Expert guide for the Osmedeus security automation workflow engine. Use when: (1) writing or editing YAML workflows (modules and flows), (2) running osmedeus CLI commands (scan, workflow management, installation, server), (3) configuring steps, runners, triggers, or template variables, (4) debugging workflow execution issues, (5) building security scanning pipelines, (6) working with agent/LLM step types, or (7) any question about osmedeus features, architecture, or best practices.
npx skill4agent add osmedeus/osmedeus-skills osmedeus-expert{{Variable}}[[variable]]# Run a flow against a target
osmedeus run -f <flow-name> -t <target>
# Run a module
osmedeus run -m <module-name> -t <target>
# Run multiple modules in sequence
osmedeus run -m mod1 -m mod2 -t <target>
# Multiple targets from file with concurrency
osmedeus run -m <module> -T targets.txt -c 5
# With parameters
osmedeus run -m <module> -t <target> -p threads=20 -p depth=2
osmedeus run -m <module> -t <target> -P params.yaml
# With timeout and repeat
osmedeus run -m <module> -t <target> --timeout 2h
osmedeus run -m <module> -t <target> --repeat --repeat-wait-time 30m
# Dry run (show what would execute)
osmedeus run -m <module> -t <target> --dry-run
# Chunked processing for large target lists
osmedeus run -m <module> -T targets.txt --chunk-size 100 --chunk-part 0
# Distributed execution
osmedeus run -m <module> -t <target> --distributed-runosmedeus workflow list # List available workflows
osmedeus workflow show <name> # Show workflow details
osmedeus workflow lint <workflow-path> # Validate workflow YAMLosmedeus install base --preset # Install base from preset repo
osmedeus install base --preset --keep-setting # Install base, keep settings
osmedeus install workflow --preset # Install workflows from preset
osmedeus install binary --all # Install all tool binaries
osmedeus install binary --name <name> # Install specific binary
osmedeus install binary --all --check # Check binary status
osmedeus install env # Add binaries to PATH
osmedeus install validate --preset # Validate installationosmedeus server # Start REST API server
osmedeus server --master # Start as distributed master
osmedeus worker join # Join as distributed worker
osmedeus worker join --get-public-ip # Join with public IP detection
osmedeus worker status # Show registered workers
osmedeus worker eval -e '<expr>' # Evaluate function with distributed hooks
osmedeus worker set <id> <field> <value> # Update worker metadata
osmedeus worker queue list # List queued tasks
osmedeus worker queue new -f <flow> -t <target> # Queue task
osmedeus worker queue run --concurrency 5 # Process queued tasksosmedeus cloud config set <key> <value> # Configure cloud provider
osmedeus cloud config list # List cloud config
osmedeus cloud create --instances N # Provision infrastructure
osmedeus cloud list # List active infrastructure
osmedeus cloud run -f <flow> -t <target> --instances N # Run distributed
osmedeus cloud destroy <id> # Destroy infrastructureosmedeus func list # List utility functions
osmedeus func e 'log_info("test")' # Evaluate a function
osmedeus snapshot export <workspace> # Export workspace as ZIP
osmedeus snapshot import <source> # Import workspace
osmedeus snapshot list # List snapshots
osmedeus update # Self-update
osmedeus update --check # Check for updates
osmedeus assets # List discovered assets
osmedeus assets -w <workspace> # Filter by workspace
osmedeus assets --source httpx --type web # Filter by source/type
osmedeus assets --stats # Show asset statistics
osmedeus assets --columns url,title,status_code # Custom columns
osmedeus assets --json # JSON output
osmedeus uninstall # Uninstall osmedeus
osmedeus uninstall --clean # Also remove workspaces dataname: my-module
kind: module
params:
- name: threads
default: "10"
steps:
- name: scan-target
type: bash
command: echo "Scanning {{Target}}"
exports:
result: "output.txt"name: my-flow
kind: flow
modules:
- name: enumeration
steps:
- name: find-subdomains
type: bash
command: subfinder -d {{Target}} -o {{Output}}/subs.txt
exports:
subdomains: "{{Output}}/subs.txt"
- name: scanning
depends_on: [enumeration]
condition: "file_length('{{subdomains}}') > 0"
steps:
- name: port-scan
type: bash
command: naabu -l {{subdomains}} -o {{Output}}/ports.txt| Type | Purpose | Key Fields |
|---|---|---|
| Shell commands | |
| JS utility functions | |
| Run steps concurrently | |
| Iterate over items | |
| Execute on docker/ssh runner | Same as bash + |
| HTTP requests | |
| LLM API calls | |
| Agentic LLM with tool loop | |
- name: step-name # Required, unique identifier
type: bash # Required
pre_condition: "expr" # JS expression, skip if false
log: "Custom message" # Log message (supports templates)
timeout: 60 # Max seconds (or "1h", "30m")
exports: # Variables for subsequent steps
var_name: "value"
on_success: [{action: log, message: "done"}]
on_error: [{action: continue}]
decision: # Conditional routing
switch: "{{var}}"
cases:
"val1": {goto: step-a}
default: {goto: _end} # _end terminates workflow
depends_on: [other-step] # DAG dependencies{{Target}}{{Output}}{{Workspaces}}{{RunUUID}}{{WorkflowName}}{{PlatformOS}}{{PlatformArch}}{{PlatformInDocker}}{{PlatformInKubernetes}}{{PlatformCloudProvider}}params:{{param_name}}[[variable]]extends: parent-workflow-name
override:
params:
threads: "5"
steps:
mode: append # append | prepend | merge
add: [{name: extra, type: bash, command: "..."}]
remove: [step-to-remove]- name: parallel-enum
type: parallel-steps
parallel_steps:
- name: subfinder
type: bash
command: subfinder -d {{Target}} -o {{Output}}/subfinder.txt
timeout: 600
- name: amass
type: bash
command: amass enum -passive -d {{Target}} -o {{Output}}/amass.txt
timeout: 900- name: scan-each-host
type: foreach
input: "{{hosts_file}}"
variable: host
threads: "{{threads}}"
step:
name: scan-host
type: bash
command: nmap -sV [[host]] -oX {{Output}}/nmap/[[host]].xml
timeout: 120
on_error: continue- name: check-depth
type: bash
command: echo "{{scan_depth}}"
decision:
switch: "{{scan_depth}}"
cases:
"quick": {goto: fast-scan}
"deep": {goto: full-scan}
default: {goto: standard-scan}- name: route-by-conditions
type: bash
command: echo "Evaluating conditions"
decision:
conditions:
- if: "file_length('{{inputFile}}') > 100"
goto: deep-analysis
- if: "file_length('{{inputFile}}') > 0"
function: "log_info('file has content')"
- if: "{{enableNmap}}"
commands:
- "nmap -sV {{Target}}"- name: analyze-findings
type: agent
query: "Analyze vulnerabilities in {{Output}}/vulns.json and prioritize by severity"
system_prompt: "You are a security analyst."
max_iterations: 10
agent_tools:
- preset: bash
- preset: read_file
- preset: grep_regex
- preset: save_content
memory:
max_messages: 30
persist_path: "{{Output}}/agent/conversation.json"
exports:
analysis: "{{agent_content}}"modules:
- name: recon
steps: [...]
- name: scanning
depends_on: [recon]
condition: "file_length('{{subdomains}}') > 0"
steps: [...]
- name: reporting
depends_on: [scanning]
steps: [...]osmedeus workflow lint <workflow-path>osmedeus run -m <module> -t test --dry-runosmedeus run -m <module> -t <target> -v[[var]]{{var}}file_length('path') > 0is_empty('{{var}}')