Loading...
Loading...
Capture web page screenshots using the Scrapfly Screenshot API with the Python SDK
npx skill4agent add scrapfly/skills scrapfly-screenshotpip install scrapfly-sdkSCRAPFLY_API_KEYGET https://api.scrapfly.io/screenshotfrom scrapfly import ScrapflyClient, ScreenshotConfig
import os
client = ScrapflyClient(key=os.environ["SCRAPFLY_API_KEY"])| Parameter | Type | Default | Description |
|---|---|---|---|
| str | required | Target URL to screenshot |
| str or | | Image format: |
| str | | Capture scope: |
| str | None | Screen dimensions, e.g. |
| str | None | Proxy country (ISO 3166-1 alpha-2, e.g. |
| int | None | Total request timeout in milliseconds |
| int | 1000 | Wait time in ms after page load before capture |
| str | None | Wait until this CSS/XPath selector is visible before capture |
| list[str] or list[ | None | Screenshot behavior flags, e.g. |
| bool | False | Scroll to bottom to trigger lazy-loading before capture |
| str | None | Raw JavaScript to execute before capture (SDK will base64‑encode; max 16KB) |
| bool | None | Enable response caching for this screenshot request |
| bool or int | None | Cache TTL (time-to-live); only effective when |
| bool | None | Clear existing cache entry for this URL when |
| str or | None | Simulate vision impairment for accessibility testing, e.g. |
| str | None | Webhook name to receive async screenshot completion callbacks |
| bool | True | Raise an exception if the upstream page returns an error status |
from scrapfly import ScrapflyClient, ScreenshotConfig
import os
client = ScrapflyClient(key=os.environ["SCRAPFLY_API_KEY"])
result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
))
# Save the screenshot image
with open("screenshot.jpg", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
format="png",
capture="fullpage",
auto_scroll=True,
))
with open("fullpage.png", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/product/1",
rendering_wait=5000,
format="png",
capture="div.features" # CSS selector
))
with open("element.png", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
resolution="375x812", # iPhone viewport
))
with open("mobile.jpg", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
options=["block_banners", "dark_mode"],
rendering_wait=2000,
))
with open("clean_dark.jpg", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
wait_for_selector="div.products",
rendering_wait=3000,
))
with open("fully_loaded_screenshot.jpg", "wb") as f:
f.write(result.image)result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
country="de",
resolution="1920x1080",
))
with open("german_version.jpg", "wb") as f:
f.write(result.image)import base64
# Example: remove navbar (adjust selector for your page: nav, header, .navbar, etc.)
js_code = "var el = document.querySelector('.navbar-collapse'); if (el) el.remove();"
js_code = base64.urlsafe_b64encode(js_code.encode('utf-8')).decode('utf-8')
result = client.screenshot(ScreenshotConfig(
url="https://web-scraping.dev/products",
js=js_code, # remove the nav bar
rendering_wait=1000,
))
with open("clean.jpg", "wb") as f:
f.write(result.image)from scrapfly.errors import ScrapflyError
try:
result = client.screenshot(ScreenshotConfig(url="https://web-scraping.dev/products"))
with open("screenshot.jpg", "wb") as f:
f.write(result.image)
except ScrapflyError as e:
print(f"Screenshot failed: {e.message}")imageformat="png""jpg"auto_scroll=Truecapture="fullpage"block_bannersrendering_waitjsX-Scrapfly-Screenshot-Url