This page focuses on the Search API. For shared setup (installation, authentication, configuration, error handling, performance, and type safety), see the SDK overview.

Quick Start

Get ranked web search results:
from perplexity import Perplexity

client = Perplexity()

search = client.search.create(
    query="latest AI developments 2024",
    max_results=5
)

for result in search.results:
    print(f"{result.title}: {result.url}")

Features

Run multiple related searches in a single request:
search = client.search.create(
    query=[
        "latest AI developments 2024",
        "solar power innovations",
        "wind energy developments"
    ]
)

# Results are combined and ranked
for i, result in enumerate(search.results):
    print(f"{i + 1}. {result.title}")
    print(f"   URL: {result.url}")
    print(f"   Date: {result.date}\n")

Limiting Search Results

Control the number of search results returned:
search = client.search.create(
    query="latest AI developments 2024",
    max_results=5  # Get only top 5 results
)

print(f"Found {len(search.results)} results")

Domain Filtering

Limit search results to specific domains for trusted sources:
search = client.search.create(
    query="climate change research",
    search_domain_filter=[
        "science.org",
        "pnas.org", 
        "cell.com",
        "nature.com"
    ],
    max_results=10
)

for result in search.results:
    from urllib.parse import urlparse
    domain = urlparse(result.url).netloc
    print(f"{result.title} [{domain}]")

Date Filtering

Filter results by publication date or recency:
# Get results from the past week
search = client.search.create(
    query="latest AI developments",
    search_recency_filter="week"
)

# Other options: "hour", "day", "week", "month", "year"
recent_search = client.search.create(
    query="breaking news technology",
    search_recency_filter="day"
)
Search academic sources for research purposes:
search = client.search.create(
    query="machine learning algorithms",
    search_mode="academic",
    max_results=10
)

for result in search.results:
    print(f"{result.title}")
    print(f"Journal/Source: {result.snippet}")
    print(f"URL: {result.url}\n")
Get geographically relevant results:
from perplexity.types import UserLocationFilter

search = client.search.create(
    query="local restaurants",
    user_location_filter=UserLocationFilter(
        latitude=37.7749,
        longitude=-122.4194,
        radius=10  # km
    )
)

# Or use a location string
city_search = client.search.create(
    query="tech events",
    user_location_filter="San Francisco, CA"
)

Advanced Usage

Complex Search Configuration

Combine multiple filters for precise results:
from perplexity.types import SearchCreateParams

advanced_search_params = SearchCreateParams(
    query="renewable energy research",
    max_results=15,
    search_mode="web",
    search_recency_filter="month",
    search_domain_filter=[
        "energy.gov",
        "iea.org",
        "irena.org",
        "nature.com",
        "science.org"
    ],
    return_images=True,
    return_snippets=True,
    user_location_filter="United States"
)

search = client.search.create(**advanced_search_params.dict())

# Process results with metadata
for result in search.results:
    print(f"Title: {result.title}")
    print(f"URL: {result.url}")
    print(f"Date: {result.date}")
    print(f"Snippet: {result.snippet}")
    
    if result.images and len(result.images) > 0:
        print(f"Images: {len(result.images)} found")
    print("---")

Error Handling

Handle search-specific errors:
import perplexity
from perplexity import Perplexity

client = Perplexity()

try:
    search = client.search.create(
        query="",  # Empty query will cause error
        max_results=5
    )
except perplexity.BadRequestError as e:
    print(f"Invalid search parameters: {e}")
except perplexity.RateLimitError as e:
    print("Search rate limit exceeded")
except perplexity.APIStatusError as e:
    print(f"Search API error {e.status_code}: {e}")
except Exception as e:
    print(f"Unexpected error: {e}")

Async Usage

import asyncio
from perplexity import AsyncPerplexity

async def main():
    client = AsyncPerplexity()
    
    # Single search
    search = await client.search.create(
        query="What is Perplexity AI?",
        max_results=5
    )
    print(search)
    
    # Concurrent searches
    tasks = [
        client.search.create(query=f"query {i}")
        for i in range(3)
    ]
    results = await asyncio.gather(*tasks)
    print(f"Completed {len(results)} searches")

asyncio.run(main())

Best Practices

1

Use appropriate search modes

Choose between “web” and “academic” search modes based on your needs.
# For general information and current events
web_search = client.search.create(
    query="latest tech news",
    search_mode="web",
    search_recency_filter="day"
)

# For research and academic information
academic_search = client.search.create(
    query="machine learning algorithms",
    search_mode="academic",
    search_domain_filter=["arxiv.org", "scholar.google.com"]
)
2

Implement effective filtering

Use domain and date filters to improve result quality and relevance.
# For financial information
financial_search = client.search.create(
    query="stock market trends",
    search_domain_filter=[
        "bloomberg.com",
        "reuters.com",
        "sec.gov",
        "nasdaq.com"
    ],
    search_recency_filter="week"
)

# For health information
health_search = client.search.create(
    query="medical research",
    search_domain_filter=[
        "nih.gov",
        "pubmed.ncbi.nlm.nih.gov",
        "who.int",
        "nejm.org"
    ]
)
3

Handle rate limits efficiently

Implement exponential backoff for rate limit errors.
import time
import random
import perplexity

def search_with_retry(client, query, max_retries=3):
    for attempt in range(max_retries):
        try:
            return client.search.create(query=query)
        except perplexity.RateLimitError:
            if attempt < max_retries - 1:
                # Exponential backoff with jitter
                delay = (2 ** attempt) + random.uniform(0, 1)
                time.sleep(delay)
            else:
                raise
4

Process concurrent searches efficiently

Use async for concurrent requests while respecting rate limits.
import asyncio
from perplexity import AsyncPerplexity

async def batch_search(queries, batch_size=3, delay_ms=1000):
    client = AsyncPerplexity()
    results = []
    
    for i in range(0, len(queries), batch_size):
        batch = queries[i:i + batch_size]
        
        batch_tasks = [
            client.search.create(query=query, max_results=5)
            for query in batch
        ]
        
        batch_results = await asyncio.gather(*batch_tasks)
        results.extend(batch_results)
        
        # Add delay between batches
        if i + batch_size < len(queries):
            await asyncio.sleep(delay_ms / 1000)
    
    return results

# Usage
queries = ["AI developments", "climate change", "space exploration"]
results = asyncio.run(batch_search(queries))
print(f"Processed {len(results)} searches")

Resources