Loading...
Loading...
Integrate Perplexity API for web-grounded AI responses and search. Covers Sonar models, Search API, SDK usage (Python/TypeScript), streaming, structured outputs, filters, media attachments, Pro Search, and prompting. Keywords: Perplexity, Sonar, sonar-pro, sonar-reasoning-pro, sonar-deep-research, web search API, grounded LLM, chat completions, perplexityai SDK, image attachments, PDF analysis.
npx skill4agent add itechmeat/llm-code perplexityreferences/models.mdreferences/search-api.mdreferences/chat-completions.mdreferences/embeddings.mdreferences/structured-outputs.mdreferences/filters.mdreferences/media.mdreferences/pro-search.mdreferences/prompting.md# Python
pip install perplexityai
# TypeScript/JavaScript
npm install @perplexityai/perplexity# macOS/Linux
export PERPLEXITY_API_KEY="your_api_key_here"
# Windows
setx PERPLEXITY_API_KEY "your_api_key_here"PERPLEXITY_API_KEYfrom perplexity import Perplexity
client = Perplexity()
completion = client.chat.completions.create(
model="sonar-pro",
messages=[{"role": "user", "content": "What is the latest news on AI?"}]
)
print(completion.choices[0].message.content)from perplexity import Perplexity
client = Perplexity()
search = client.search.create(
query="artificial intelligence trends 2024",
max_results=5
)
for result in search.results:
print(f"{result.title}: {result.url}")| Model | Use Case | Cost |
|---|---|---|
| Quick facts, simple Q&A | Lowest |
| Complex queries, research | Medium |
| Multi-step reasoning, analysis | Medium |
| Exhaustive research, reports | Highest |
stream = client.chat.completions.create(
messages=[{"role": "user", "content": "Explain quantum computing"}],
model="sonar",
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")messages = [
{"role": "system", "content": "You are a research assistant."},
{"role": "user", "content": "What causes climate change?"},
{"role": "assistant", "content": "Climate change is caused by..."},
{"role": "user", "content": "What are the solutions?"}
]
completion = client.chat.completions.create(messages=messages, model="sonar")completion = client.chat.completions.create(
messages=[{"role": "user", "content": "Latest renewable energy news"}],
model="sonar",
web_search_options={
"search_recency_filter": "week",
"search_domain_filter": ["energy.gov", "iea.org"]
}
)# REQUIRES stream=True
completion = client.chat.completions.create(
model="sonar-pro",
messages=[{"role": "user", "content": "Research solar panel ROI"}],
search_type="pro",
stream=True
)
for chunk in completion:
print(chunk.choices[0].delta.content or "", end="")completion = client.chat.completions.create(
model="sonar-pro",
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
]
}]
)completion = client.chat.completions.create(
model="sonar-pro",
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "Summarize this document"},
{"type": "file_url", "file_url": {"url": "https://example.com/report.pdf"}}
]
}]
)completion = client.chat.completions.create(
model="sonar",
messages=[{"role": "user", "content": "Mount Everest photos"}],
return_images=True,
image_format_filter=["jpg", "png"]
)# Allowlist: include only these domains
search = client.search.create(
query="climate research",
search_domain_filter=["science.org", "nature.com"]
)
# Denylist: exclude these domains
search = client.search.create(
query="tech news",
search_domain_filter=["-reddit.com", "-pinterest.com"]
)search = client.search.create(
query=[
"AI trends 2024",
"machine learning healthcare",
"neural networks applications"
],
max_results=5
)
for i, query_results in enumerate(search.results):
print(f"Query {i+1} results:")
for result in query_results:
print(f" {result.title}")from pydantic import BaseModel
class ContactInfo(BaseModel):
email: str
phone: str
completion = client.chat.completions.create(
model="sonar-pro",
messages=[{"role": "user", "content": "Find contact for Tesla IR"}],
response_format={
"type": "json_schema",
"json_schema": {"schema": ContactInfo.model_json_schema()}
}
)
contact = ContactInfo.model_validate_json(completion.choices[0].message.content)import asyncio
from perplexity import AsyncPerplexity
async def main():
async with AsyncPerplexity() as client:
tasks = [
client.search.create(query="AI news"),
client.search.create(query="tech trends")
]
results = await asyncio.gather(*tasks)
asyncio.run(main())import time
from perplexity import RateLimitError
def search_with_retry(client, query, max_retries=3):
for attempt in range(max_retries):
try:
return client.search.create(query=query)
except RateLimitError:
if attempt < max_retries - 1:
time.sleep(2 ** attempt)
else:
raise| Parameter | Default | Description |
|---|---|---|
| 0.7 | Creativity (0-2) |
| varies | Response length limit |
| 0.9 | Nucleus sampling |
| 0 | Reduce repetition (-2 to 2) |
| 0 | Reduce word frequency (-2 to 2) |
| Parameter | Description |
|---|---|
| 1-20 results per query |
| Content extraction depth (default 2048) |
| ISO country code for regional results |
| Domain allowlist/denylist (max 20) |
| ISO 639-1 language codes (max 10) |
| Model | Input | Output |
|---|---|---|
| sonar | $1 | $1 |
| sonar-pro | $3 | $15 |
| sonar-reasoning-pro | $2 | $8 |
citationsdict[str, Any]search_domain_filterstream=Truesonar-deep-researchdata:import perplexity
try:
completion = client.chat.completions.create(...)
except perplexity.BadRequestError as e:
print(f"Invalid parameters: {e}")
except perplexity.RateLimitError:
print("Rate limited, retry later")
except perplexity.APIStatusError as e:
print(f"API error: {e.status_code}")