Getting Started
To use the Hreflang API, you'll need:
- An account: Sign in with LinkedIn for free
- An API key: Generate one from your account dashboard
- Make requests: Include your API key in every request
Base URL: https://app.hreflang.org/api
Authentication
All API requests require your API key as a parameter:
GET /api/account/limits?key=YOUR_API_KEY
Keep your API key secure! Don't expose it in client-side code or public repositories.
Rate Limits
API usage is limited based on your account tier:
Limit Type | Free Tier | Premium Tier |
---|---|---|
URLs per test | 50 | 1,000 |
Tests per day | 10 | 500 |
API Endpoints
POST
/api/test/submit/urllist
Submit URL List for Testing
Submit a list of URLs to test for hreflang implementation.
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
url_list |
string | Required | Newline-separated list of URLs to test |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl -X POST "https://app.hreflang.org/api/test/submit/urllist" \
-d "key=YOUR_API_KEY" \
-d "url_list=https://example.com/en/
https://example.com/es/
https://example.com/fr/"
import requests
url = "https://app.hreflang.org/api/test/submit/urllist"
data = {
"key": "YOUR_API_KEY",
"url_list": "https://example.com/en/\nhttps://example.com/es/\nhttps://example.com/fr/"
}
response = requests.post(url, data=data)
result = response.json()
print(f"Test ID: {result['test_id']}")
๐ค AI Agent Integration: Use our langchain-hreflang package to integrate hreflang testing into your AI workflows. Install with
pip install langchain-hreflang
.
LangChain
from langchain_hreflang import HreflangTool
# Initialize the tool
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
# Test a list of URLs
urls = """https://example.com/en/
https://example.com/es/
https://example.com/fr/"""
result = hreflang_tool._run(urls)
print(result)
LangGraph
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_hreflang import HreflangTool
# Create agent with hreflang tool
llm = ChatOpenAI(model="gpt-4")
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
agent = create_react_agent(llm, [hreflang_tool])
# Use in workflow
result = agent.invoke({
"messages": [("user", "Test hreflang for these URLs: https://example.com/en/, https://example.com/es/, https://example.com/fr/")]
})
print(result["messages"][-1].content)
CrewAI
from crewai import Agent, Task, Crew
from langchain_hreflang import HreflangTool
# Create SEO agent with hreflang tool
seo_agent = Agent(
role="SEO Specialist",
goal="Analyze and validate hreflang implementation",
backstory="Expert in international SEO and hreflang validation",
tools=[HreflangTool(api_key="YOUR_API_KEY")],
verbose=True
)
# Define hreflang validation task
hreflang_task = Task(
description="Test the hreflang implementation for the provided URLs: https://example.com/en/, https://example.com/es/, https://example.com/fr/",
expected_output="Detailed hreflang validation report",
agent=seo_agent
)
# Create and run crew
crew = Crew(
agents=[seo_agent],
tasks=[hreflang_task],
verbose=True
)
result = crew.kickoff()
print(result)
$url = "https://hreflang.org/api/test/submit/urllist";
$data = [
'key' => 'YOUR_API_KEY',
'url_list' => "https://example.com/en/\nhttps://example.com/es/\nhttps://example.com/fr/"
];
$response = file_get_contents($url, false, stream_context_create([
'http' => [
'method' => 'POST',
'content' => http_build_query($data)
]
]));
$result = json_decode($response, true);
echo "Test ID: " . $result['test_id'];
const response = await fetch('https://hreflang.org/api/test/submit/urllist', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body: new URLSearchParams({
key: 'YOUR_API_KEY',
url_list: 'https://example.com/en/\nhttps://example.com/es/\nhttps://example.com/fr/'
})
});
const result = await response.json();
console.log(`Test ID: ${result.test_id}`);
Response
{
"test_id": "a1b2c3d4e5f6789012345678901234567890123456",
"success": true,
"status": "submitted"
}
POST
/api/test/submit/sitemap
Submit Sitemap for Testing
Submit a sitemap URL to test all contained URLs for hreflang implementation.
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
sitemap |
string | Required | URL of the XML sitemap to test |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl -X POST "https://hreflang.org/api/test/submit/sitemap" \
-d "key=YOUR_API_KEY" \
-d "sitemap=https://example.com/sitemap.xml"
import requests
url = "https://hreflang.org/api/test/submit/sitemap"
data = {
"key": "YOUR_API_KEY",
"sitemap": "https://example.com/sitemap.xml"
}
response = requests.post(url, data=data)
result = response.json()
print(f"Test ID: {result['test_id']}")
๐ค AI Agent Integration: Use our langchain-hreflang package to integrate hreflang testing into your AI workflows. Install with
pip install langchain-hreflang
.
LangChain
from langchain_hreflang import HreflangTool
# Initialize the tool
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
# Test a sitemap
result = hreflang_tool._run("https://example.com/sitemap.xml")
print(result)
LangGraph
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_hreflang import HreflangTool
# Create agent with hreflang tool
llm = ChatOpenAI(model="gpt-4")
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
agent = create_react_agent(llm, [hreflang_tool])
# Use in workflow
result = agent.invoke({
"messages": [("user", "Test hreflang for https://example.com/sitemap.xml")]
})
print(result["messages"][-1].content)
CrewAI
from crewai import Agent, Task, Crew
from langchain_hreflang import HreflangTool
# Create SEO agent with hreflang tool
seo_agent = Agent(
role="SEO Specialist",
goal="Analyze and validate hreflang implementation",
backstory="Expert in international SEO and hreflang validation",
tools=[HreflangTool(api_key="YOUR_API_KEY")],
verbose=True
)
# Define hreflang validation task
hreflang_task = Task(
description="Test the hreflang implementation for the sitemap: https://example.com/sitemap.xml",
expected_output="Detailed hreflang validation report",
agent=seo_agent
)
# Create and run crew
crew = Crew(
agents=[seo_agent],
tasks=[hreflang_task],
verbose=True
)
result = crew.kickoff()
print(result)
$url = "https://hreflang.org/api/test/submit/sitemap";
$data = [
'key' => 'YOUR_API_KEY',
'sitemap' => 'https://example.com/sitemap.xml'
];
$response = file_get_contents($url, false, stream_context_create([
'http' => [
'method' => 'POST',
'content' => http_build_query($data)
]
]));
$result = json_decode($response, true);
const response = await fetch('https://hreflang.org/api/test/submit/sitemap', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body: new URLSearchParams({
key: 'YOUR_API_KEY',
sitemap: 'https://example.com/sitemap.xml'
})
});
const result = await response.json();
Response
{
"test_id": "a1b2c3d4e5f6789012345678901234567890123456",
"success": true,
"status": "submitted"
}
GET
/api/test/status
Get Test Status
Check the current status of a submitted test.
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
test_id |
string | Required | Test ID returned from submit endpoint |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl "https://app.hreflang.org/api/test/status?key=YOUR_API_KEY&test_id=1a4bc2e9d47977e2f80669366251a39120250820045052"
import requests
url = "https://app.hreflang.org/api/test/status"
params = {
"key": "YOUR_API_KEY",
"test_id": "1a4bc2e9d47977e2f80669366251a39120250820045052"
}
response = requests.get(url, params=params)
result = response.json()
print(f"Status: {result['test_status']}")
๐ค AI Agent Integration: The langchain-hreflang package offers both automatic status handling and manual control. These examples show custom status monitoring workflows.
LangChain - Manual Status Checking
from langchain_hreflang import HreflangClient
from langchain.tools import Tool
import time
def check_test_status_tool(test_id: str) -> str:
"""Custom tool to check hreflang test status"""
client = HreflangClient(api_key="YOUR_API_KEY")
try:
status_response = client.get_test_status(test_id)
status = status_response.get("test_status")
pages = status_response.get("num_pages_in_test", 0)
submitted = status_response.get("test_submitted_at", "Unknown")
return f"Test {test_id}: {status} | {pages} pages | Submitted: {submitted}"
except Exception as e:
return f"Error checking status: {str(e)}"
# Create custom status tool
status_tool = Tool(
name="check_hreflang_status",
func=check_test_status_tool,
description="Check the status of a specific hreflang test"
)
# Use the tool
result = status_tool.run("YOUR_TEST_ID_HERE")
print(result)
LangGraph - Custom Status Monitoring
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain.tools import Tool
from langchain_hreflang import HreflangClient
def monitor_test_progress(input_data: str) -> str:
"""Monitor test progress with custom intervals"""
client = HreflangClient(api_key="YOUR_API_KEY")
# Extract test_id from input
test_id = input_data.strip()
if not test_id:
return "Error: Test ID required"
try:
status_response = client.get_test_status(test_id)
status = status_response.get("test_status")
if status == "complete":
results = client.get_test_results(test_id)
num_pages = len(results.get("test_results", {}).get("hreflang_map", {}))
return f"โ
Test completed! {num_pages} pages analyzed. Results available."
elif status == "pending":
return f"โณ Test is running... Status: {status}"
elif status == "submitted":
return f"๐ฅ Test queued for processing... Status: {status}"
else:
return f"โ Test status: {status}"
except Exception as e:
return f"Error: {str(e)}"
# Create monitoring tool
monitor_tool = Tool(
name="monitor_hreflang_test",
func=monitor_test_progress,
description="Monitor the progress of a hreflang test by test ID"
)
llm = ChatOpenAI(model="gpt-4")
agent = create_react_agent(llm, [monitor_tool])
result = agent.invoke({
"messages": [("user", "Check the status of test ID: 1a4bc2e9d47977e2f80669366251a39120250820045052")]
})
print(result["messages"][-1].content)
CrewAI - Automated Status Monitoring
from crewai import Agent, Task, Crew
from langchain.tools import Tool
from langchain_hreflang import HreflangClient
import time
def continuous_monitor(test_ids: str) -> str:
"""Monitor multiple tests and provide status updates"""
client = HreflangClient(api_key="YOUR_API_KEY")
# Parse test IDs
ids = [tid.strip() for tid in test_ids.split(",") if tid.strip()]
results = []
for test_id in ids:
try:
status_response = client.get_test_status(test_id)
status = status_response.get("test_status")
pages = status_response.get("num_pages_in_test", 0)
results.append(f"Test {test_id[:8]}...: {status} ({pages} pages)")
except Exception as e:
results.append(f"Test {test_id[:8]}...: Error - {str(e)}")
return "\\n".join(results)
monitoring_tool = Tool(
name="monitor_multiple_tests",
func=continuous_monitor,
description="Monitor status of multiple hreflang tests"
)
# Create monitoring agent
monitor_agent = Agent(
role="Test Status Monitor",
goal="Track and report on hreflang test progress",
backstory="Expert in API monitoring with focus on SEO testing workflows",
tools=[monitoring_tool],
verbose=True
)
monitor_task = Task(
description="Monitor these test IDs and provide status updates: 1a4bc2e9d47977e2f80669366251a39120250820045052",
expected_output="Status report for all monitored tests",
agent=monitor_agent
)
crew = Crew(
agents=[monitor_agent],
tasks=[monitor_task],
verbose=True
)
result = crew.kickoff()
print(result)
$params = [
'key' => 'YOUR_API_KEY',
'test_id' => '1a4bc2e9d47977e2f80669366251a39120250820045052'
];
$url = "https://app.hreflang.org/api/test/status?" . http_build_query($params);
$response = file_get_contents($url);
$result = json_decode($response, true);
const params = new URLSearchParams({
key: 'YOUR_API_KEY',
test_id: '1a4bc2e9d47977e2f80669366251a39120250820045052'
});
const response = await fetch(`https://app.hreflang.org/api/test/status?${params}`);
const result = await response.json();
Response
{
"test_id": "1a4bc2e9d47977e2f80669366251a39120250820045052",
"test_submitted_at": "2025-08-20 04:50:52",
"test_status": "complete",
"num_pages_in_test": 3,
"crawl_started_at": "2025-08-20 04:50:52",
"test_completed_at": "2025-08-19 21:50:52.639864"
}
Test Status Values
submitted
- Test has been queued but not startedpending
- Test is currently runningcomplete
- Test has finished, results available
GET
/api/test/results
Get Test Results
Retrieve the complete results of a finished test.
Note: Results are only available when test status is "complete".
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
test_id |
string | Required | Test ID returned from submit endpoint |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl "https://app.hreflang.org/api/test/results?key=YOUR_API_KEY&test_id=1a4bc2e9d47977e2f80669366251a39120250820045052"
import requests
url = "https://app.hreflang.org/api/test/results"
params = {
"key": "YOUR_API_KEY",
"test_id": "1a4bc2e9d47977e2f80669366251a39120250820045052"
}
response = requests.get(url, params=params)
results = response.json()
# Access hreflang map
hreflang_map = results['test_results']['hreflang_map']
for url, data in hreflang_map.items():
print(f"URL: {url}")
print(f"Self-reported language: {data['self_lang']}")
print(f"Hreflang links: {data['hreflangs']}")
print("---")
๐ค AI Agent Integration: The langchain-hreflang package automatically retrieves and formats results. Perfect for AI-powered analysis and reporting.
LangChain - Results Analysis
from langchain_hreflang import HreflangTool
# Initialize the tool
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
# Test and get results automatically
result = hreflang_tool._run("https://example.com/sitemap.xml")
# Results include formatted analysis
print("Hreflang Test Results:")
print(result)
LangGraph - Automated Analysis
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_hreflang import HreflangTool
# Create agent with hreflang tool
llm = ChatOpenAI(model="gpt-4")
hreflang_tool = HreflangTool(api_key="YOUR_API_KEY")
agent = create_react_agent(llm, [hreflang_tool])
# Analyze results with AI
result = agent.invoke({
"messages": [("user", "Analyze the hreflang implementation for https://example.com/sitemap.xml and provide recommendations")]
})
print(result["messages"][-1].content)
CrewAI - SEO Report Generation
from crewai import Agent, Task, Crew
from langchain_hreflang import HreflangTool
# Create SEO analysis agent
seo_analyst = Agent(
role="SEO Analyst",
goal="Analyze hreflang test results and generate actionable insights",
backstory="Expert in international SEO with deep knowledge of hreflang best practices",
tools=[HreflangTool(api_key="YOUR_API_KEY")],
verbose=True
)
# Define analysis task
analysis_task = Task(
description="Run hreflang tests on https://example.com/sitemap.xml and generate a comprehensive SEO report with specific recommendations",
expected_output="Detailed SEO report with hreflang analysis and actionable recommendations",
agent=seo_analyst
)
# Generate comprehensive report
crew = Crew(
agents=[seo_analyst],
tasks=[analysis_task],
verbose=True
)
report = crew.kickoff()
print(report)
$params = [
'key' => 'YOUR_API_KEY',
'test_id' => '1a4bc2e9d47977e2f80669366251a39120250820045052'
];
$url = "https://app.hreflang.org/api/test/results?" . http_build_query($params);
$response = file_get_contents($url);
$results = json_decode($response, true);
// Check for errors
if (isset($results['test_results'])) {
$hreflangMap = $results['test_results']['hreflang_map'];
$returnTagErrors = $results['test_results']['return_tag_errors'];
$otherErrors = $results['test_results']['all_other_errors_and_warnings'];
}
const params = new URLSearchParams({
key: 'YOUR_API_KEY',
test_id: '1a4bc2e9d47977e2f80669366251a39120250820045052'
});
const response = await fetch(`https://app.hreflang.org/api/test/results?${params}`);
const results = await response.json();
if (results.test_results) {
const { hreflang_map, return_tag_errors, all_other_errors_and_warnings } = results.test_results;
// Process results
Object.entries(hreflang_map).forEach(([url, data]) => {
console.log(`URL: ${url}`);
console.log(`Self-lang: ${data.self_lang}`);
console.log(`Hreflangs:`, data.hreflangs);
});
}
Response Structure
{
"test_id": "1a4bc2e9d47977e2f80669366251a39120250820045052",
"test_submitted_at": "2025-08-20 04:50:52",
"test_status": "complete",
"num_pages_in_test": 3,
"crawl_started_at": "2025-08-20 04:50:52",
"test_completed_at": "2025-08-19 21:50:52.639864",
"test_results": {
"hreflang_map": {
"https:\/\/www.diffen.com\/": {
"self_lang": "en",
"hreflangs": {
"es": "https:\/\/es.diffen.com\/",
"en": "https:\/\/www.diffen.com\/"
}
},
"https:\/\/es.diffen.com\/": {
"self_lang": "es",
"hreflangs": {
"es": "https:\/\/es.diffen.com\/",
"en": "https:\/\/www.diffen.com\/"
}
},
"https:\/\/en.diffen.com\/": {
"self_lang": "",
"hreflangs": []
}
},
"return_tag_errors": [],
"all_other_errors_and_warnings": {
"https:\/\/en.diffen.com\/": {
"errors": [
"Could not load this page."
],
"warnings": []
}
}
}
}
GET
/api/account/limits
Get Account Limits
Retrieve your current account limits and usage quotas.
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl "https://hreflang.org/api/account/limits?key=YOUR_API_KEY"
import requests
url = "https://hreflang.org/api/account/limits"
params = {"key": "YOUR_API_KEY"}
response = requests.get(url, params=params)
limits = response.json()
print(f"URLs per test: {limits['limit_urls_per_test']}")
print(f"Tests per day: {limits['limit_tests_per_day']}")
๐ค AI Agent Integration: While the langchain-hreflang package doesn't directly expose limits checking, these examples show how to integrate account management into AI workflows.
LangChain - Custom Limits Tool
import requests
from langchain.tools import Tool
def get_hreflang_limits(api_key: str) -> str:
"""Get account limits for hreflang API"""
url = "https://app.hreflang.org/api/account/limits"
params = {"key": api_key}
response = requests.get(url, params=params)
data = response.json()
return f"URLs per test: {data['limit_urls_per_test']}, Tests per day: {data['limit_tests_per_day']}"
# Create custom tool
limits_tool = Tool(
name="hreflang_limits",
func=lambda x: get_hreflang_limits("YOUR_API_KEY"),
description="Get current hreflang API account limits"
)
# Use the tool
result = limits_tool.run("")
print(result)
LangGraph - Account Management
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain.tools import Tool
import requests
def check_limits(api_key: str = "YOUR_API_KEY") -> str:
response = requests.get("https://app.hreflang.org/api/account/limits",
params={"key": api_key})
return str(response.json())
limits_tool = Tool(
name="check_account_limits",
func=check_limits,
description="Check hreflang API account limits and usage"
)
llm = ChatOpenAI(model="gpt-4")
agent = create_react_agent(llm, [limits_tool])
result = agent.invoke({
"messages": [("user", "What are my current API limits?")]
})
print(result["messages"][-1].content)
CrewAI - Usage Monitoring
from crewai import Agent, Task, Crew
from langchain.tools import Tool
import requests
def monitor_usage(api_key: str = "YOUR_API_KEY") -> str:
limits_response = requests.get("https://app.hreflang.org/api/account/limits",
params={"key": api_key})
tests_response = requests.get("https://app.hreflang.org/api/account/tests",
params={"key": api_key})
return f"Limits: {limits_response.json()}, Recent tests: {len(tests_response.json().get('tests', []))}"
usage_tool = Tool(
name="monitor_api_usage",
func=monitor_usage,
description="Monitor hreflang API usage and limits"
)
# Create monitoring agent
monitor_agent = Agent(
role="API Usage Monitor",
goal="Track and report on API usage and limits",
backstory="Expert in API management and usage optimization",
tools=[usage_tool],
verbose=True
)
monitor_task = Task(
description="Check current API usage and provide recommendations for optimization",
expected_output="Usage report with optimization suggestions",
agent=monitor_agent
)
crew = Crew(
agents=[monitor_agent],
tasks=[monitor_task],
verbose=True
)
result = crew.kickoff()
print(result)
$url = "https://hreflang.org/api/account/limits?key=YOUR_API_KEY";
$response = file_get_contents($url);
$limits = json_decode($response, true);
echo "URLs per test: " . $limits['limit_urls_per_test'] . "\n";
echo "Tests per day: " . $limits['limit_tests_per_day'] . "\n";
const response = await fetch('https://hreflang.org/api/account/limits?key=YOUR_API_KEY');
const limits = await response.json();
console.log(`URLs per test: ${limits.limit_urls_per_test}`);
console.log(`Tests per day: ${limits.limit_tests_per_day}`);
Response
{
"limit_urls_per_test": 200,
"limit_tests_per_day": 50
}
GET
/api/account/tests
Get Account Test History
Retrieve a list of your recent tests.
Parameters
Parameter | Type | Required | Description |
---|---|---|---|
key |
string | Required | Your API key |
cURL
Python
Python-AI Agents
PHP
TypeScript
curl "https://hreflang.org/api/account/tests?key=YOUR_API_KEY"
import requests
url = "https://hreflang.org/api/account/tests"
params = {"key": "YOUR_API_KEY"}
response = requests.get(url, params=params)
data = response.json()
for test in data['tests']:
print(f"Test ID: {test['test_id']}")
print(f"Status: {test['test_status']}")
print(f"Submitted: {test['submitted_at']}")
print(f"Pages: {test['num_pages_in_test']}")
print("---")
๐ค AI Agent Integration: Create intelligent workflows that track and analyze your test history using AI agents.
LangChain - Test History Analysis
import requests
from langchain.tools import Tool
def get_test_history(api_key: str) -> str:
"""Get recent hreflang test history"""
url = "https://app.hreflang.org/api/account/tests"
params = {"key": api_key}
response = requests.get(url, params=params)
data = response.json()
history = []
for test in data.get('tests', []):
history.append(f"ID: {test['test_id']}, Status: {test['test_status']}, Pages: {test['num_pages_in_test']}")
return "\n".join(history[:5]) # Show last 5 tests
history_tool = Tool(
name="hreflang_history",
func=lambda x: get_test_history("YOUR_API_KEY"),
description="Get recent hreflang test history"
)
result = history_tool.run("")
print("Recent tests:")
print(result)
LangGraph - Trend Analysis
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain.tools import Tool
import requests
def analyze_test_trends(api_key: str = "YOUR_API_KEY") -> str:
response = requests.get("https://app.hreflang.org/api/account/tests",
params={"key": api_key})
data = response.json()
tests = data.get('tests', [])
completed = sum(1 for t in tests if t['test_status'] == 'complete')
total_pages = sum(t['num_pages_in_test'] for t in tests)
return f"Total tests: {len(tests)}, Completed: {completed}, Total pages tested: {total_pages}"
trend_tool = Tool(
name="analyze_trends",
func=analyze_test_trends,
description="Analyze hreflang testing trends and patterns"
)
llm = ChatOpenAI(model="gpt-4")
agent = create_react_agent(llm, [trend_tool])
result = agent.invoke({
"messages": [("user", "Analyze my hreflang testing patterns and provide insights")]
})
print(result["messages"][-1].content)
CrewAI - Testing Strategy Optimization
from crewai import Agent, Task, Crew
from langchain.tools import Tool
import requests
from datetime import datetime
def comprehensive_analysis(api_key: str = "YOUR_API_KEY") -> str:
tests_response = requests.get("https://app.hreflang.org/api/account/tests",
params={"key": api_key})
limits_response = requests.get("https://app.hreflang.org/api/account/limits",
params={"key": api_key})
tests_data = tests_response.json()
limits_data = limits_response.json()
analysis = {
"total_tests": len(tests_data.get('tests', [])),
"limits": limits_data,
"recent_activity": tests_data.get('tests', [])[:3]
}
return str(analysis)
analysis_tool = Tool(
name="comprehensive_analysis",
func=comprehensive_analysis,
description="Perform comprehensive analysis of hreflang testing patterns"
)
# Create strategy optimization agent
strategy_agent = Agent(
role="SEO Strategy Analyst",
goal="Optimize hreflang testing strategies based on historical data",
backstory="Expert in SEO analytics and testing optimization with deep knowledge of hreflang best practices",
tools=[analysis_tool],
verbose=True
)
optimization_task = Task(
description="Analyze my hreflang testing history and provide strategic recommendations for improving testing efficiency and coverage",
expected_output="Strategic recommendations for optimizing hreflang testing approach",
agent=strategy_agent
)
crew = Crew(
agents=[strategy_agent],
tasks=[optimization_task],
verbose=True
)
recommendations = crew.kickoff()
print(recommendations)
$url = "https://hreflang.org/api/account/tests?key=YOUR_API_KEY";
$response = file_get_contents($url);
$data = json_decode($response, true);
foreach ($data['tests'] as $test) {
echo "Test ID: " . $test['test_id'] . "\n";
echo "Status: " . $test['test_status'] . "\n";
echo "Submitted: " . $test['submitted_at'] . "\n";
echo "Pages: " . $test['num_pages_in_test'] . "\n";
echo "---\n";
}
const response = await fetch('https://hreflang.org/api/account/tests?key=YOUR_API_KEY');
const data = await response.json();
data.tests.forEach(test => {
console.log(`Test ID: ${test.test_id}`);
console.log(`Status: ${test.test_status}`);
console.log(`Submitted: ${test.submitted_at}`);
console.log(`Pages: ${test.num_pages_in_test}`);
console.log('---');
});
Response
{
"tests": [
{
"test_id": "a1b2c3d4e5f6789012345678901234567890123456",
"submitted_at": "2024-01-15 10:30:00",
"num_pages_in_test": 25,
"test_status": "complete"
},
{
"test_id": "b2c3d4e5f6789012345678901234567890123456a1",
"submitted_at": "2024-01-14 15:20:00",
"num_pages_in_test": 10,
"test_status": "complete"
}
]
}
Error Codes
The API uses standard HTTP status codes to indicate success or failure:
Code | Meaning | Description |
---|---|---|
200 |
OK | Request successful |
400 |
Bad Request | Invalid parameters or request format |
403 |
Forbidden | Invalid API key or access denied |
404 |
Not Found | Invalid endpoint or test ID not found |
405 |
Method Not Allowed | HTTP method not supported for this endpoint |
413 |
Payload Too Large | Request size exceeds limits |
429 |
Too Many Requests | Rate limit exceeded |
Error Response Format
All errors return a JSON object with an error message:
{
"error": "Daily test limit exceeded. You can run 10 tests per day."
}