Loading...
Loading...
AI agent-focused RSS feed discovery tool with JSON output. Use when Claude needs to discover RSS/Atom feeds from websites for monitoring, aggregation, or content syndication purposes. Triggered by: "find RSS feed", "discover RSS", "find Atom feed", "get RSS URLs", "find feeds from [URL]", or when working with content aggregation, feed readers, or RSS monitoring workflows.
npx skill4agent add brooksy4503/rss-agent-discovery rss-agent-discoverynpx -y rss-agent-discovery https://vercel.com{
"success": true,
"results": [{
"url": "https://vercel.com/",
"feeds": [{
"url": "https://vercel.com/atom",
"title": "atom",
"type": "atom"
}],
"error": null,
"diagnostics": []
}]
}npx -y rss-agent-discovery <url> [url2] [url3]...npx -y rss-agent-discovery https://example.com | jq '.results[0].feeds'{
success: boolean, // true if no URLs had errors
partialResults?: boolean, // true if success=false but some feeds found
results: [{
url: string, // scanned URL
feeds: [{
url: string, // feed URL
title: string, // feed title from HTML
type: 'rss' | 'atom' | 'unknown'
}],
error: string | null, // error message if scan failed (timeout errors normalized to "Timeout")
diagnostics?: string[] // optional array of warning messages for non-fatal issues
}]
}--verbose--verbosesuccesspartialResultssuccess === falseerrordiagnostics--verbose0--help--version12npx -y rss-agent-discovery https://example.com
if [ $? -eq 0 ]; then
echo "Feeds found!"
fi--timeout <ms> # Timeout per URL (default: 10000)
--skip-blogs # Skip blog subdirectory scanning
--max-blogs <n> # Limit blog scans (default: 3)
--blog-paths <paths> # Custom blog paths (comma or pipe separated)
--verbose # Enable debug logging to stderr (default: JSON-only output)
--help # Show help
--version # Show versionnpx -y rss-agent-discovery --timeout 15000 https://example.com
npx -y rss-agent-discovery --skip-blogs https://example.com
npx -y rss-agent-discovery --blog-paths '/blog,/news,/articles' https://example.com
npx -y rss-agent-discovery --blog-paths '/blog|/updates' https://example.com
npx -y rss-agent-discovery --max-blogs 5 https://example.com<link>/rss.xml/atom/feed/blog/news/articlesnpx -y rss-agent-discovery https://example.com | jq '.results[0].feeds[].url'npx -y rss-agent-discovery https://site1.com https://site2.com https://site3.comnpx -y rss-agent-discovery https://example.com | jq -r '.results[0].feeds[].url'npx -y rss-agent-discovery https://example.com
exit_code=$?
[ $exit_code -eq 0 ] && echo "Feeds found"npx -y rss-agent-discovery --timeout 20000 https://slow-site.comnpx -y rss-agent-discovery --skip-blogs https://example.com#!/bin/bash
# No need to redirect stderr - it's clean by default
result=$(npx -y rss-agent-discovery "$1")
if [ $? -eq 0 ]; then
echo "Found feeds:"
echo "$result" | jq '.results[0].feeds'
fiimport subprocess
import json
result = subprocess.run(
['npx', '-y', 'rss-agent-discovery', url],
capture_output=True,
text=True
)
if result.returncode == 0:
data = json.loads(result.stdout)
feeds = data['results'][0]['feeds']const { execSync } = require('child_process');
const result = JSON.parse(
execSync('npx -y rss-agent-discovery https://example.com').toString()
);
const feeds = result.results[0].feeds;rss-url-finderrss-finder--verbosenpx -y rss-agent-discovery https://vercel.com
npx -y rss-agent-discovery https://news.ycombinator.com