Add SEO skills 19-28, 31-32 with full Python implementations
12 new skills: Keyword Strategy, SERP Analysis, Position Tracking, Link Building, Content Strategy, E-Commerce SEO, KPI Framework, International SEO, AI Visibility, Knowledge Graph, Competitor Intel, and Crawl Budget. ~20K lines of Python across 25 domain scripts. Updated skill 11 pipeline table and repo CLAUDE.md. Enhanced skill 18 local SEO workflow from jamie.clinic audit. Note: Skill 26 hreflang_validator.py pending (content filter block). Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
207
custom-skills/25-seo-kpi-framework/code/scripts/base_client.py
Normal file
207
custom-skills/25-seo-kpi-framework/code/scripts/base_client.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""
|
||||
Base Client - Shared async client utilities
|
||||
===========================================
|
||||
Purpose: Rate-limited async operations for API clients
|
||||
Python: 3.10+
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from asyncio import Semaphore
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, TypeVar
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from tenacity import (
|
||||
retry,
|
||||
stop_after_attempt,
|
||||
wait_exponential,
|
||||
retry_if_exception_type,
|
||||
)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Logging setup
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
"""Rate limiter using token bucket algorithm."""
|
||||
|
||||
def __init__(self, rate: float, per: float = 1.0):
|
||||
"""
|
||||
Initialize rate limiter.
|
||||
|
||||
Args:
|
||||
rate: Number of requests allowed
|
||||
per: Time period in seconds (default: 1 second)
|
||||
"""
|
||||
self.rate = rate
|
||||
self.per = per
|
||||
self.tokens = rate
|
||||
self.last_update = datetime.now()
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire a token, waiting if necessary."""
|
||||
async with self._lock:
|
||||
now = datetime.now()
|
||||
elapsed = (now - self.last_update).total_seconds()
|
||||
self.tokens = min(self.rate, self.tokens + elapsed * (self.rate / self.per))
|
||||
self.last_update = now
|
||||
|
||||
if self.tokens < 1:
|
||||
wait_time = (1 - self.tokens) * (self.per / self.rate)
|
||||
await asyncio.sleep(wait_time)
|
||||
self.tokens = 0
|
||||
else:
|
||||
self.tokens -= 1
|
||||
|
||||
|
||||
class BaseAsyncClient:
|
||||
"""Base class for async API clients with rate limiting."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_concurrent: int = 5,
|
||||
requests_per_second: float = 3.0,
|
||||
logger: logging.Logger | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize base client.
|
||||
|
||||
Args:
|
||||
max_concurrent: Maximum concurrent requests
|
||||
requests_per_second: Rate limit
|
||||
logger: Logger instance
|
||||
"""
|
||||
self.semaphore = Semaphore(max_concurrent)
|
||||
self.rate_limiter = RateLimiter(requests_per_second)
|
||||
self.logger = logger or logging.getLogger(self.__class__.__name__)
|
||||
self.stats = {
|
||||
"requests": 0,
|
||||
"success": 0,
|
||||
"errors": 0,
|
||||
"retries": 0,
|
||||
}
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=2, max=10),
|
||||
retry=retry_if_exception_type(Exception),
|
||||
)
|
||||
async def _rate_limited_request(
|
||||
self,
|
||||
coro: Callable[[], Any],
|
||||
) -> Any:
|
||||
"""Execute a request with rate limiting and retry."""
|
||||
async with self.semaphore:
|
||||
await self.rate_limiter.acquire()
|
||||
self.stats["requests"] += 1
|
||||
try:
|
||||
result = await coro()
|
||||
self.stats["success"] += 1
|
||||
return result
|
||||
except Exception as e:
|
||||
self.stats["errors"] += 1
|
||||
self.logger.error(f"Request failed: {e}")
|
||||
raise
|
||||
|
||||
async def batch_requests(
|
||||
self,
|
||||
requests: list[Callable[[], Any]],
|
||||
desc: str = "Processing",
|
||||
) -> list[Any]:
|
||||
"""Execute multiple requests concurrently."""
|
||||
try:
|
||||
from tqdm.asyncio import tqdm
|
||||
has_tqdm = True
|
||||
except ImportError:
|
||||
has_tqdm = False
|
||||
|
||||
async def execute(req: Callable) -> Any:
|
||||
try:
|
||||
return await self._rate_limited_request(req)
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
tasks = [execute(req) for req in requests]
|
||||
|
||||
if has_tqdm:
|
||||
results = []
|
||||
for coro in tqdm.as_completed(tasks, total=len(tasks), desc=desc):
|
||||
result = await coro
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
def print_stats(self) -> None:
|
||||
"""Print request statistics."""
|
||||
self.logger.info("=" * 40)
|
||||
self.logger.info("Request Statistics:")
|
||||
self.logger.info(f" Total Requests: {self.stats['requests']}")
|
||||
self.logger.info(f" Successful: {self.stats['success']}")
|
||||
self.logger.info(f" Errors: {self.stats['errors']}")
|
||||
self.logger.info("=" * 40)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""Manage API configuration and credentials."""
|
||||
|
||||
def __init__(self):
|
||||
load_dotenv()
|
||||
|
||||
@property
|
||||
def google_credentials_path(self) -> str | None:
|
||||
"""Get Google service account credentials path."""
|
||||
# Prefer SEO-specific credentials, fallback to general credentials
|
||||
seo_creds = os.path.expanduser("~/.credential/ourdigital-seo-agent.json")
|
||||
if os.path.exists(seo_creds):
|
||||
return seo_creds
|
||||
return os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
|
||||
@property
|
||||
def pagespeed_api_key(self) -> str | None:
|
||||
"""Get PageSpeed Insights API key."""
|
||||
return os.getenv("PAGESPEED_API_KEY")
|
||||
|
||||
@property
|
||||
def custom_search_api_key(self) -> str | None:
|
||||
"""Get Custom Search API key."""
|
||||
return os.getenv("CUSTOM_SEARCH_API_KEY")
|
||||
|
||||
@property
|
||||
def custom_search_engine_id(self) -> str | None:
|
||||
"""Get Custom Search Engine ID."""
|
||||
return os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
@property
|
||||
def notion_token(self) -> str | None:
|
||||
"""Get Notion API token."""
|
||||
return os.getenv("NOTION_TOKEN") or os.getenv("NOTION_API_KEY")
|
||||
|
||||
def validate_google_credentials(self) -> bool:
|
||||
"""Validate Google credentials are configured."""
|
||||
creds_path = self.google_credentials_path
|
||||
if not creds_path:
|
||||
return False
|
||||
return os.path.exists(creds_path)
|
||||
|
||||
def get_required(self, key: str) -> str:
|
||||
"""Get required environment variable or raise error."""
|
||||
value = os.getenv(key)
|
||||
if not value:
|
||||
raise ValueError(f"Missing required environment variable: {key}")
|
||||
return value
|
||||
|
||||
|
||||
# Singleton config instance
|
||||
config = ConfigManager()
|
||||
@@ -0,0 +1,758 @@
|
||||
"""
|
||||
KPI Aggregator - Unified SEO KPI aggregation across all dimensions
|
||||
==================================================================
|
||||
Purpose: Aggregate KPIs from Ahrefs and other sources into a unified
|
||||
dashboard with health scores, baselines, targets, and ROI.
|
||||
Python: 3.10+
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
|
||||
from base_client import BaseAsyncClient, config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Data classes
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class KpiMetric:
|
||||
"""Single KPI metric with trend and target information."""
|
||||
name: str
|
||||
value: float
|
||||
previous_value: float | None = None
|
||||
change_pct: float | None = None
|
||||
trend: str = "stable" # up, down, stable
|
||||
target_30d: float | None = None
|
||||
target_60d: float | None = None
|
||||
target_90d: float | None = None
|
||||
|
||||
def compute_trend(self) -> None:
|
||||
"""Compute trend direction and change percentage."""
|
||||
if self.previous_value is not None and self.previous_value != 0:
|
||||
self.change_pct = round(
|
||||
((self.value - self.previous_value) / abs(self.previous_value)) * 100, 2
|
||||
)
|
||||
if self.change_pct > 2.0:
|
||||
self.trend = "up"
|
||||
elif self.change_pct < -2.0:
|
||||
self.trend = "down"
|
||||
else:
|
||||
self.trend = "stable"
|
||||
|
||||
|
||||
@dataclass
|
||||
class KpiDimension:
|
||||
"""A dimension grouping multiple KPI metrics."""
|
||||
name: str
|
||||
metrics: list[KpiMetric] = field(default_factory=list)
|
||||
weight: float = 0.0
|
||||
score: float = 0.0
|
||||
|
||||
def compute_score(self) -> float:
|
||||
"""Compute dimension score (0-100) based on metrics health."""
|
||||
if not self.metrics:
|
||||
self.score = 0.0
|
||||
return self.score
|
||||
metric_scores = []
|
||||
for m in self.metrics:
|
||||
if m.trend == "up":
|
||||
metric_scores.append(80.0)
|
||||
elif m.trend == "stable":
|
||||
metric_scores.append(60.0)
|
||||
else:
|
||||
metric_scores.append(35.0)
|
||||
# Boost score if value is positive and non-zero
|
||||
if m.value and m.value > 0:
|
||||
metric_scores[-1] = min(100.0, metric_scores[-1] + 10.0)
|
||||
self.score = round(sum(metric_scores) / len(metric_scores), 1)
|
||||
return self.score
|
||||
|
||||
|
||||
@dataclass
|
||||
class HealthScore:
|
||||
"""Overall SEO health score."""
|
||||
overall: float = 0.0
|
||||
dimensions: dict[str, float] = field(default_factory=dict)
|
||||
trend: str = "stable"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RoiEstimate:
|
||||
"""ROI estimation from Ahrefs traffic cost."""
|
||||
traffic_value_usd: float = 0.0
|
||||
traffic_value_change: float = 0.0
|
||||
estimated_monthly_value: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class KpiResult:
|
||||
"""Complete KPI aggregation result."""
|
||||
url: str = ""
|
||||
health_score: float = 0.0
|
||||
health_trend: str = "stable"
|
||||
kpis: dict[str, Any] = field(default_factory=dict)
|
||||
targets: dict[str, Any] = field(default_factory=dict)
|
||||
roi: RoiEstimate | None = None
|
||||
baseline_comparison: dict[str, Any] | None = None
|
||||
executive_summary: dict[str, Any] = field(default_factory=dict)
|
||||
timestamp: str = ""
|
||||
errors: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dimension weights
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DIMENSION_WEIGHTS = {
|
||||
"traffic": 0.25,
|
||||
"rankings": 0.20,
|
||||
"technical": 0.20,
|
||||
"content": 0.15,
|
||||
"links": 0.15,
|
||||
"local": 0.05,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KPI Aggregator
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class KpiAggregator(BaseAsyncClient):
|
||||
"""Aggregate SEO KPIs across all dimensions from Ahrefs data."""
|
||||
|
||||
AHREFS_BASE = "https://api.ahrefs.com/v3"
|
||||
|
||||
def __init__(self, api_token: str | None = None):
|
||||
super().__init__(max_concurrent=3, requests_per_second=2.0)
|
||||
self.api_token = api_token or config.get_required("AHREFS_API_TOKEN")
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.api_token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
# ----- Ahrefs API helpers -----
|
||||
|
||||
async def _ahrefs_get(
|
||||
self, session: aiohttp.ClientSession, endpoint: str, params: dict
|
||||
) -> dict:
|
||||
"""Make an authenticated GET request to Ahrefs API."""
|
||||
url = f"{self.AHREFS_BASE}/{endpoint}"
|
||||
async with session.get(url, headers=self.headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
self.logger.warning(f"Ahrefs {endpoint} returned {resp.status}: {text}")
|
||||
return {"error": f"HTTP {resp.status}", "detail": text}
|
||||
return await resp.json()
|
||||
|
||||
# ----- Dimension collectors -----
|
||||
|
||||
async def get_traffic_kpis(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> KpiDimension:
|
||||
"""Collect traffic KPIs via site-explorer-metrics."""
|
||||
dim = KpiDimension(name="traffic", weight=DIMENSION_WEIGHTS["traffic"])
|
||||
try:
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
if "error" not in data:
|
||||
metrics = data.get("metrics", data)
|
||||
organic = metrics.get("organic", {})
|
||||
organic_traffic = organic.get("traffic", 0)
|
||||
traffic_value_raw = organic.get("cost", 0)
|
||||
traffic_value_usd = traffic_value_raw / 100.0 if traffic_value_raw else 0.0
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="organic_traffic", value=float(organic_traffic))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="traffic_value_usd", value=round(traffic_value_usd, 2))
|
||||
)
|
||||
else:
|
||||
dim.metrics.append(KpiMetric(name="organic_traffic", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="traffic_value_usd", value=0.0))
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Traffic KPI error: {exc}")
|
||||
dim.metrics.append(KpiMetric(name="organic_traffic", value=0.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
async def get_ranking_kpis(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> KpiDimension:
|
||||
"""Collect ranking KPIs via site-explorer-metrics."""
|
||||
dim = KpiDimension(name="rankings", weight=DIMENSION_WEIGHTS["rankings"])
|
||||
try:
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
if "error" not in data:
|
||||
metrics = data.get("metrics", data)
|
||||
organic = metrics.get("organic", {})
|
||||
keywords_total = organic.get("keywords", 0)
|
||||
# Estimate top10 as ~20% of total keywords
|
||||
top10_estimate = int(keywords_total * 0.20)
|
||||
# Visibility score heuristic: based on traffic relative to keywords
|
||||
traffic = organic.get("traffic", 0)
|
||||
visibility = min(100.0, (traffic / max(keywords_total, 1)) * 10)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="visibility_score", value=round(visibility, 1))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="top10_keywords", value=float(top10_estimate))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="total_keywords", value=float(keywords_total))
|
||||
)
|
||||
else:
|
||||
dim.metrics.append(KpiMetric(name="visibility_score", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="top10_keywords", value=0.0))
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Ranking KPI error: {exc}")
|
||||
dim.metrics.append(KpiMetric(name="visibility_score", value=0.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
async def get_link_kpis(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> KpiDimension:
|
||||
"""Collect link KPIs via domain-rating and metrics."""
|
||||
dim = KpiDimension(name="links", weight=DIMENSION_WEIGHTS["links"])
|
||||
try:
|
||||
# Domain rating
|
||||
dr_data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/domain-rating",
|
||||
{"target": url},
|
||||
)
|
||||
domain_rating = 0.0
|
||||
if "error" not in dr_data:
|
||||
domain_rating = float(
|
||||
dr_data.get("domain_rating", dr_data.get("domainRating", 0))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="domain_rating", value=round(domain_rating, 1))
|
||||
)
|
||||
|
||||
# Referring domains from metrics
|
||||
metrics_data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
ref_domains = 0
|
||||
if "error" not in metrics_data:
|
||||
metrics = metrics_data.get("metrics", metrics_data)
|
||||
ref_domains = metrics.get("refdomains", 0)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="referring_domains", value=float(ref_domains))
|
||||
)
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Link KPI error: {exc}")
|
||||
dim.metrics.append(KpiMetric(name="domain_rating", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="referring_domains", value=0.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
async def get_technical_kpis(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> KpiDimension:
|
||||
"""Collect technical KPIs (estimated from available data)."""
|
||||
dim = KpiDimension(name="technical", weight=DIMENSION_WEIGHTS["technical"])
|
||||
try:
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
if "error" not in data:
|
||||
metrics = data.get("metrics", data)
|
||||
organic = metrics.get("organic", {})
|
||||
pages_crawled = metrics.get("pages", organic.get("pages", 0))
|
||||
# Heuristic: technical health score from available data
|
||||
has_traffic = organic.get("traffic", 0) > 0
|
||||
has_pages = pages_crawled > 0
|
||||
tech_score = 50.0
|
||||
if has_traffic:
|
||||
tech_score += 25.0
|
||||
if has_pages:
|
||||
tech_score += 25.0
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="technical_health_score", value=round(tech_score, 1))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="pages_crawled", value=float(pages_crawled))
|
||||
)
|
||||
else:
|
||||
dim.metrics.append(KpiMetric(name="technical_health_score", value=50.0))
|
||||
dim.metrics.append(KpiMetric(name="pages_crawled", value=0.0))
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Technical KPI error: {exc}")
|
||||
dim.metrics.append(KpiMetric(name="technical_health_score", value=50.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
async def get_content_kpis(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> KpiDimension:
|
||||
"""Collect content KPIs from available metrics."""
|
||||
dim = KpiDimension(name="content", weight=DIMENSION_WEIGHTS["content"])
|
||||
try:
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
if "error" not in data:
|
||||
metrics = data.get("metrics", data)
|
||||
organic = metrics.get("organic", {})
|
||||
pages = metrics.get("pages", organic.get("pages", 0))
|
||||
keywords = organic.get("keywords", 0)
|
||||
# Content freshness heuristic
|
||||
freshness = min(100.0, (keywords / max(pages, 1)) * 5) if pages else 0.0
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="indexed_pages", value=float(pages))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="content_freshness_score", value=round(freshness, 1))
|
||||
)
|
||||
dim.metrics.append(
|
||||
KpiMetric(name="keywords_per_page", value=round(keywords / max(pages, 1), 2))
|
||||
)
|
||||
else:
|
||||
dim.metrics.append(KpiMetric(name="indexed_pages", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="content_freshness_score", value=0.0))
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Content KPI error: {exc}")
|
||||
dim.metrics.append(KpiMetric(name="indexed_pages", value=0.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
async def get_local_kpis(self, url: str) -> KpiDimension:
|
||||
"""Placeholder for local KPIs (requires external data)."""
|
||||
dim = KpiDimension(name="local", weight=DIMENSION_WEIGHTS["local"])
|
||||
dim.metrics.append(KpiMetric(name="gbp_visibility", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="review_score", value=0.0))
|
||||
dim.metrics.append(KpiMetric(name="citation_accuracy", value=0.0))
|
||||
dim.compute_score()
|
||||
return dim
|
||||
|
||||
# ----- Health score -----
|
||||
|
||||
def calculate_health_score(self, dimensions: list[KpiDimension]) -> HealthScore:
|
||||
"""Calculate weighted health score across all dimensions."""
|
||||
health = HealthScore()
|
||||
total_weight = 0.0
|
||||
weighted_sum = 0.0
|
||||
|
||||
for dim in dimensions:
|
||||
dim.compute_score()
|
||||
health.dimensions[dim.name] = dim.score
|
||||
weighted_sum += dim.score * dim.weight
|
||||
total_weight += dim.weight
|
||||
|
||||
if total_weight > 0:
|
||||
health.overall = round(weighted_sum / total_weight, 1)
|
||||
else:
|
||||
health.overall = 0.0
|
||||
|
||||
# Determine trend from dimension trends
|
||||
up_count = sum(
|
||||
1 for d in dimensions
|
||||
for m in d.metrics if m.trend == "up"
|
||||
)
|
||||
down_count = sum(
|
||||
1 for d in dimensions
|
||||
for m in d.metrics if m.trend == "down"
|
||||
)
|
||||
if up_count > down_count:
|
||||
health.trend = "improving"
|
||||
elif down_count > up_count:
|
||||
health.trend = "declining"
|
||||
else:
|
||||
health.trend = "stable"
|
||||
|
||||
return health
|
||||
|
||||
# ----- Targets -----
|
||||
|
||||
def set_targets(self, dimensions: list[KpiDimension]) -> dict[str, Any]:
|
||||
"""Calculate 30/60/90 day targets (5%/10%/20% improvement)."""
|
||||
targets = {"30_day": {}, "60_day": {}, "90_day": {}}
|
||||
growth_rates = {"30_day": 0.05, "60_day": 0.10, "90_day": 0.20}
|
||||
|
||||
for dim in dimensions:
|
||||
for metric in dim.metrics:
|
||||
if metric.value and metric.value > 0:
|
||||
for period, rate in growth_rates.items():
|
||||
key = f"{dim.name}.{metric.name}"
|
||||
# For metrics where lower is better (e.g. bounce rate),
|
||||
# improvement means decrease
|
||||
if metric.name in ("bounce_rate", "crawl_errors", "thin_content_ratio"):
|
||||
target_val = metric.value * (1 - rate)
|
||||
else:
|
||||
target_val = metric.value * (1 + rate)
|
||||
targets[period][key] = round(target_val, 2)
|
||||
metric.target_30d = targets["30_day"].get(f"{dim.name}.{metric.name}")
|
||||
metric.target_60d = targets["60_day"].get(f"{dim.name}.{metric.name}")
|
||||
metric.target_90d = targets["90_day"].get(f"{dim.name}.{metric.name}")
|
||||
return targets
|
||||
|
||||
# ----- ROI estimation -----
|
||||
|
||||
def estimate_roi(self, traffic_dim: KpiDimension) -> RoiEstimate:
|
||||
"""Estimate ROI from Ahrefs traffic cost data."""
|
||||
roi = RoiEstimate()
|
||||
for metric in traffic_dim.metrics:
|
||||
if metric.name == "traffic_value_usd":
|
||||
roi.traffic_value_usd = metric.value
|
||||
roi.estimated_monthly_value = metric.value
|
||||
if metric.previous_value is not None:
|
||||
roi.traffic_value_change = round(
|
||||
metric.value - metric.previous_value, 2
|
||||
)
|
||||
return roi
|
||||
|
||||
# ----- Baseline comparison -----
|
||||
|
||||
def compare_baseline(
|
||||
self, current: list[KpiDimension], baseline: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Compare current KPIs against a stored baseline."""
|
||||
comparison = {}
|
||||
baseline_kpis = baseline.get("kpis", {})
|
||||
|
||||
for dim in current:
|
||||
dim_baseline = baseline_kpis.get(dim.name, {})
|
||||
dim_comparison = {}
|
||||
for metric in dim.metrics:
|
||||
baseline_val = None
|
||||
if isinstance(dim_baseline, dict):
|
||||
baseline_val = dim_baseline.get(metric.name)
|
||||
if baseline_val is not None:
|
||||
metric.previous_value = float(baseline_val)
|
||||
metric.compute_trend()
|
||||
dim_comparison[metric.name] = {
|
||||
"current": metric.value,
|
||||
"baseline": baseline_val,
|
||||
"change_pct": metric.change_pct,
|
||||
"trend": metric.trend,
|
||||
}
|
||||
else:
|
||||
dim_comparison[metric.name] = {
|
||||
"current": metric.value,
|
||||
"baseline": None,
|
||||
"change_pct": None,
|
||||
"trend": "no_baseline",
|
||||
}
|
||||
comparison[dim.name] = dim_comparison
|
||||
return comparison
|
||||
|
||||
# ----- Executive summary -----
|
||||
|
||||
def generate_executive_summary(
|
||||
self, dimensions: list[KpiDimension], health: HealthScore
|
||||
) -> dict[str, Any]:
|
||||
"""Generate executive summary with wins, concerns, recommendations."""
|
||||
wins = []
|
||||
concerns = []
|
||||
recommendations = []
|
||||
|
||||
for dim in dimensions:
|
||||
for metric in dim.metrics:
|
||||
if metric.trend == "up" and metric.change_pct and metric.change_pct > 5:
|
||||
wins.append(
|
||||
f"{dim.name}/{metric.name}: +{metric.change_pct}% improvement"
|
||||
)
|
||||
elif metric.trend == "down" and metric.change_pct and metric.change_pct < -5:
|
||||
concerns.append(
|
||||
f"{dim.name}/{metric.name}: {metric.change_pct}% decline"
|
||||
)
|
||||
|
||||
# Generate recommendations based on dimension scores
|
||||
for dim in dimensions:
|
||||
if dim.score < 50:
|
||||
recommendations.append(
|
||||
f"Priority: Improve {dim.name} dimension (score: {dim.score}/100)"
|
||||
)
|
||||
elif dim.score < 70:
|
||||
recommendations.append(
|
||||
f"Monitor: {dim.name} dimension needs attention (score: {dim.score}/100)"
|
||||
)
|
||||
|
||||
if not wins:
|
||||
wins.append("No significant improvements detected in this period")
|
||||
if not concerns:
|
||||
concerns.append("No significant declines detected in this period")
|
||||
if not recommendations:
|
||||
recommendations.append("All dimensions performing well - maintain current strategy")
|
||||
|
||||
return {
|
||||
"health_score": health.overall,
|
||||
"health_trend": health.trend,
|
||||
"top_wins": wins[:5],
|
||||
"top_concerns": concerns[:5],
|
||||
"recommendations": recommendations[:5],
|
||||
}
|
||||
|
||||
# ----- Main orchestration -----
|
||||
|
||||
async def aggregate(
|
||||
self,
|
||||
url: str,
|
||||
include_roi: bool = False,
|
||||
baseline_path: str | None = None,
|
||||
set_baseline: bool = False,
|
||||
) -> KpiResult:
|
||||
"""Orchestrate full KPI aggregation across all dimensions."""
|
||||
result = KpiResult(url=url, timestamp=datetime.now().isoformat())
|
||||
dimensions: list[KpiDimension] = []
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Collect all dimensions concurrently
|
||||
tasks = [
|
||||
self.get_traffic_kpis(session, url),
|
||||
self.get_ranking_kpis(session, url),
|
||||
self.get_link_kpis(session, url),
|
||||
self.get_technical_kpis(session, url),
|
||||
self.get_content_kpis(session, url),
|
||||
]
|
||||
gathered = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
for item in gathered:
|
||||
if isinstance(item, Exception):
|
||||
result.errors.append(str(item))
|
||||
self.logger.error(f"Dimension error: {item}")
|
||||
else:
|
||||
dimensions.append(item)
|
||||
|
||||
# Local KPIs (no API call needed)
|
||||
local_dim = await self.get_local_kpis(url)
|
||||
dimensions.append(local_dim)
|
||||
|
||||
# Load baseline if provided
|
||||
if baseline_path:
|
||||
try:
|
||||
baseline_data = json.loads(Path(baseline_path).read_text())
|
||||
result.baseline_comparison = self.compare_baseline(dimensions, baseline_data)
|
||||
except Exception as exc:
|
||||
result.errors.append(f"Baseline load error: {exc}")
|
||||
|
||||
# Calculate health score
|
||||
health = self.calculate_health_score(dimensions)
|
||||
result.health_score = health.overall
|
||||
result.health_trend = health.trend
|
||||
|
||||
# Build KPI dictionary
|
||||
for dim in dimensions:
|
||||
result.kpis[dim.name] = {
|
||||
"score": dim.score,
|
||||
"weight": dim.weight,
|
||||
"metrics": {m.name: asdict(m) for m in dim.metrics},
|
||||
}
|
||||
|
||||
# Set targets
|
||||
targets = self.set_targets(dimensions)
|
||||
result.targets = targets
|
||||
|
||||
# ROI estimation
|
||||
if include_roi:
|
||||
traffic_dim = next((d for d in dimensions if d.name == "traffic"), None)
|
||||
if traffic_dim:
|
||||
roi = self.estimate_roi(traffic_dim)
|
||||
result.roi = roi
|
||||
|
||||
# Executive summary
|
||||
result.executive_summary = self.generate_executive_summary(dimensions, health)
|
||||
|
||||
# Save baseline if requested
|
||||
if set_baseline:
|
||||
baseline_out = {
|
||||
"url": url,
|
||||
"timestamp": result.timestamp,
|
||||
"kpis": {},
|
||||
}
|
||||
for dim in dimensions:
|
||||
baseline_out["kpis"][dim.name] = {
|
||||
m.name: m.value for m in dim.metrics
|
||||
}
|
||||
baseline_file = f"baseline_{url.replace('https://', '').replace('/', '_')}.json"
|
||||
Path(baseline_file).write_text(json.dumps(baseline_out, indent=2))
|
||||
self.logger.info(f"Baseline saved to {baseline_file}")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Output formatting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def format_text_report(result: KpiResult) -> str:
|
||||
"""Format KPI result as human-readable text report."""
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append(f"SEO KPI Dashboard: {result.url}")
|
||||
lines.append(f"Timestamp: {result.timestamp}")
|
||||
lines.append("=" * 60)
|
||||
lines.append("")
|
||||
|
||||
# Health score
|
||||
lines.append(f"Overall Health Score: {result.health_score}/100 ({result.health_trend})")
|
||||
lines.append("-" * 40)
|
||||
|
||||
# Dimensions
|
||||
for dim_name, dim_data in result.kpis.items():
|
||||
lines.append(f"\n[{dim_name.upper()}] Score: {dim_data['score']}/100 (weight: {dim_data['weight']})")
|
||||
metrics = dim_data.get("metrics", {})
|
||||
for m_name, m_data in metrics.items():
|
||||
trend_arrow = {"up": "^", "down": "v", "stable": "=", "no_baseline": "?"}.get(
|
||||
m_data.get("trend", "stable"), "="
|
||||
)
|
||||
val = m_data.get("value", 0)
|
||||
change = m_data.get("change_pct")
|
||||
change_str = f" ({change:+.1f}%)" if change is not None else ""
|
||||
lines.append(f" {trend_arrow} {m_name}: {val}{change_str}")
|
||||
|
||||
# Targets
|
||||
if result.targets:
|
||||
lines.append("\n" + "-" * 40)
|
||||
lines.append("TARGETS")
|
||||
for period, targets in result.targets.items():
|
||||
if targets:
|
||||
lines.append(f"\n {period}:")
|
||||
for key, val in list(targets.items())[:10]:
|
||||
lines.append(f" {key}: {val}")
|
||||
|
||||
# ROI
|
||||
if result.roi:
|
||||
lines.append("\n" + "-" * 40)
|
||||
lines.append("ROI ESTIMATE")
|
||||
lines.append(f" Traffic Value (USD): ${result.roi.traffic_value_usd:,.2f}")
|
||||
lines.append(f" Monthly Value: ${result.roi.estimated_monthly_value:,.2f}")
|
||||
lines.append(f" Value Change: ${result.roi.traffic_value_change:,.2f}")
|
||||
|
||||
# Executive summary
|
||||
if result.executive_summary:
|
||||
lines.append("\n" + "-" * 40)
|
||||
lines.append("EXECUTIVE SUMMARY")
|
||||
lines.append(f" Health: {result.executive_summary.get('health_score', 0)}/100")
|
||||
lines.append(f" Trend: {result.executive_summary.get('health_trend', 'stable')}")
|
||||
lines.append("\n Top Wins:")
|
||||
for win in result.executive_summary.get("top_wins", []):
|
||||
lines.append(f" + {win}")
|
||||
lines.append("\n Top Concerns:")
|
||||
for concern in result.executive_summary.get("top_concerns", []):
|
||||
lines.append(f" - {concern}")
|
||||
lines.append("\n Recommendations:")
|
||||
for rec in result.executive_summary.get("recommendations", []):
|
||||
lines.append(f" > {rec}")
|
||||
|
||||
# Errors
|
||||
if result.errors:
|
||||
lines.append("\n" + "-" * 40)
|
||||
lines.append("ERRORS:")
|
||||
for err in result.errors:
|
||||
lines.append(f" ! {err}")
|
||||
|
||||
lines.append("\n" + "=" * 60)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def serialize_result(result: KpiResult) -> dict:
|
||||
"""Serialize KpiResult to JSON-safe dictionary."""
|
||||
data = {
|
||||
"url": result.url,
|
||||
"health_score": result.health_score,
|
||||
"health_trend": result.health_trend,
|
||||
"kpis": result.kpis,
|
||||
"targets": result.targets,
|
||||
"executive_summary": result.executive_summary,
|
||||
"timestamp": result.timestamp,
|
||||
"errors": result.errors,
|
||||
}
|
||||
if result.roi:
|
||||
data["roi"] = asdict(result.roi)
|
||||
if result.baseline_comparison:
|
||||
data["baseline_comparison"] = result.baseline_comparison
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command-line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="SEO KPI Aggregator - Unified metrics dashboard"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--url", required=True, help="Target URL or domain to analyze"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set-baseline", action="store_true",
|
||||
help="Save current KPIs as baseline file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--baseline", type=str, default=None,
|
||||
help="Path to baseline JSON file for comparison"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--roi", action="store_true",
|
||||
help="Include ROI estimation from traffic cost"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json", action="store_true",
|
||||
help="Output results as JSON"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output", type=str, default=None,
|
||||
help="Save output to file path"
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
"""Main entry point."""
|
||||
args = parse_args()
|
||||
|
||||
aggregator = KpiAggregator()
|
||||
result = await aggregator.aggregate(
|
||||
url=args.url,
|
||||
include_roi=args.roi,
|
||||
baseline_path=args.baseline,
|
||||
set_baseline=args.set_baseline,
|
||||
)
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(serialize_result(result), indent=2, ensure_ascii=False)
|
||||
else:
|
||||
output = format_text_report(result)
|
||||
|
||||
if args.output:
|
||||
Path(args.output).write_text(output, encoding="utf-8")
|
||||
logger.info(f"Output saved to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
aggregator.print_stats()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,801 @@
|
||||
"""
|
||||
Performance Reporter - Period-over-period SEO performance reports
|
||||
================================================================
|
||||
Purpose: Generate executive summaries, trend analysis, tactical breakdowns,
|
||||
and target-vs-actual comparison from Ahrefs historical data.
|
||||
Python: 3.10+
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
|
||||
from base_client import BaseAsyncClient, config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Data classes
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class TrendData:
|
||||
"""Single trend data point for a metric."""
|
||||
period: str
|
||||
value: float
|
||||
change_pct: float | None = None
|
||||
direction: str = "stable" # up, down, stable
|
||||
|
||||
|
||||
@dataclass
|
||||
class WinConcern:
|
||||
"""A notable win or concern from performance analysis."""
|
||||
category: str
|
||||
description: str
|
||||
impact: str = "medium" # high, medium, low
|
||||
action: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class TargetProgress:
|
||||
"""Target vs actual progress tracking."""
|
||||
kpi_name: str
|
||||
target: float
|
||||
actual: float
|
||||
progress_pct: float = 0.0
|
||||
|
||||
def compute_progress(self) -> None:
|
||||
"""Compute progress percentage toward target."""
|
||||
if self.target and self.target != 0:
|
||||
self.progress_pct = round((self.actual / self.target) * 100, 1)
|
||||
else:
|
||||
self.progress_pct = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class PerformanceReport:
|
||||
"""Complete performance report."""
|
||||
url: str = ""
|
||||
period: str = "monthly"
|
||||
date_from: str = ""
|
||||
date_to: str = ""
|
||||
health_score: float = 0.0
|
||||
health_trend: str = "stable"
|
||||
trends: dict[str, list[TrendData]] = field(default_factory=dict)
|
||||
wins: list[WinConcern] = field(default_factory=list)
|
||||
concerns: list[WinConcern] = field(default_factory=list)
|
||||
executive_summary: dict[str, Any] = field(default_factory=dict)
|
||||
tactical_breakdown: dict[str, Any] = field(default_factory=dict)
|
||||
target_progress: list[TargetProgress] = field(default_factory=list)
|
||||
traffic_value_change: float = 0.0
|
||||
timestamp: str = ""
|
||||
errors: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Period helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
PERIOD_DAYS = {
|
||||
"monthly": 30,
|
||||
"quarterly": 90,
|
||||
"yearly": 365,
|
||||
}
|
||||
|
||||
|
||||
def get_date_range(
|
||||
period: str, date_from: str | None = None, date_to: str | None = None
|
||||
) -> tuple[str, str]:
|
||||
"""Compute date range from period or explicit dates."""
|
||||
if date_from and date_to:
|
||||
return date_from, date_to
|
||||
end = datetime.now()
|
||||
days = PERIOD_DAYS.get(period, 30)
|
||||
start = end - timedelta(days=days)
|
||||
return start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def get_previous_range(
|
||||
date_from: str, date_to: str
|
||||
) -> tuple[str, str]:
|
||||
"""Compute the previous period of equal length for comparison."""
|
||||
start = datetime.strptime(date_from, "%Y-%m-%d")
|
||||
end = datetime.strptime(date_to, "%Y-%m-%d")
|
||||
delta = end - start
|
||||
prev_end = start - timedelta(days=1)
|
||||
prev_start = prev_end - delta
|
||||
return prev_start.strftime("%Y-%m-%d"), prev_end.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Performance Reporter
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class PerformanceReporter(BaseAsyncClient):
|
||||
"""Generate period-over-period SEO performance reports from Ahrefs."""
|
||||
|
||||
AHREFS_BASE = "https://api.ahrefs.com/v3"
|
||||
|
||||
def __init__(self, api_token: str | None = None):
|
||||
super().__init__(max_concurrent=3, requests_per_second=2.0)
|
||||
self.api_token = api_token or config.get_required("AHREFS_API_TOKEN")
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.api_token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
async def _ahrefs_get(
|
||||
self, session: aiohttp.ClientSession, endpoint: str, params: dict
|
||||
) -> dict:
|
||||
"""Make an authenticated GET request to Ahrefs API."""
|
||||
url = f"{self.AHREFS_BASE}/{endpoint}"
|
||||
async with session.get(url, headers=self.headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
self.logger.warning(f"Ahrefs {endpoint} returned {resp.status}: {text}")
|
||||
return {"error": f"HTTP {resp.status}", "detail": text}
|
||||
return await resp.json()
|
||||
|
||||
# ----- Data collectors -----
|
||||
|
||||
async def get_metrics_history(
|
||||
self,
|
||||
session: aiohttp.ClientSession,
|
||||
url: str,
|
||||
date_from: str,
|
||||
date_to: str,
|
||||
) -> list[dict]:
|
||||
"""Fetch historical metrics via site-explorer-metrics-history."""
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics-history",
|
||||
{
|
||||
"target": url,
|
||||
"mode": "domain",
|
||||
"date_from": date_from,
|
||||
"date_to": date_to,
|
||||
},
|
||||
)
|
||||
if "error" in data:
|
||||
self.logger.warning(f"Metrics history error: {data}")
|
||||
return []
|
||||
return data.get("metrics", data.get("data", []))
|
||||
|
||||
async def get_dr_history(
|
||||
self,
|
||||
session: aiohttp.ClientSession,
|
||||
url: str,
|
||||
date_from: str,
|
||||
date_to: str,
|
||||
) -> list[dict]:
|
||||
"""Fetch domain rating history."""
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/domain-rating-history",
|
||||
{
|
||||
"target": url,
|
||||
"date_from": date_from,
|
||||
"date_to": date_to,
|
||||
},
|
||||
)
|
||||
if "error" in data:
|
||||
return []
|
||||
return data.get("domain_rating_history", data.get("data", []))
|
||||
|
||||
async def get_current_metrics(
|
||||
self, session: aiohttp.ClientSession, url: str
|
||||
) -> dict:
|
||||
"""Fetch current snapshot metrics."""
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/metrics",
|
||||
{"target": url, "mode": "domain"},
|
||||
)
|
||||
if "error" in data:
|
||||
return {}
|
||||
return data.get("metrics", data)
|
||||
|
||||
async def get_volume_history(
|
||||
self,
|
||||
session: aiohttp.ClientSession,
|
||||
url: str,
|
||||
date_from: str,
|
||||
date_to: str,
|
||||
) -> list[dict]:
|
||||
"""Fetch total search volume history."""
|
||||
data = await self._ahrefs_get(
|
||||
session,
|
||||
"site-explorer/total-search-volume-history",
|
||||
{
|
||||
"target": url,
|
||||
"date_from": date_from,
|
||||
"date_to": date_to,
|
||||
},
|
||||
)
|
||||
if "error" in data:
|
||||
return []
|
||||
return data.get("total_search_volume_history", data.get("data", []))
|
||||
|
||||
# ----- Analysis methods -----
|
||||
|
||||
def calculate_period_comparison(
|
||||
self, current_data: list[dict], previous_data: list[dict], metric_key: str
|
||||
) -> list[TrendData]:
|
||||
"""Compare metric values between current and previous period."""
|
||||
trends = []
|
||||
|
||||
def avg_metric(data_list: list[dict], key: str) -> float:
|
||||
vals = []
|
||||
for entry in data_list:
|
||||
val = entry.get(key)
|
||||
if val is None:
|
||||
organic = entry.get("organic", {})
|
||||
val = organic.get(key)
|
||||
if val is not None:
|
||||
vals.append(float(val))
|
||||
return sum(vals) / len(vals) if vals else 0.0
|
||||
|
||||
current_avg = avg_metric(current_data, metric_key)
|
||||
previous_avg = avg_metric(previous_data, metric_key)
|
||||
|
||||
change_pct = None
|
||||
direction = "stable"
|
||||
if previous_avg and previous_avg != 0:
|
||||
change_pct = round(((current_avg - previous_avg) / abs(previous_avg)) * 100, 2)
|
||||
if change_pct > 2.0:
|
||||
direction = "up"
|
||||
elif change_pct < -2.0:
|
||||
direction = "down"
|
||||
|
||||
trends.append(TrendData(
|
||||
period=metric_key,
|
||||
value=round(current_avg, 2),
|
||||
change_pct=change_pct,
|
||||
direction=direction,
|
||||
))
|
||||
return trends
|
||||
|
||||
def identify_wins(
|
||||
self, current: dict, previous: dict
|
||||
) -> list[WinConcern]:
|
||||
"""Identify significant positive changes between periods."""
|
||||
wins = []
|
||||
metric_labels = {
|
||||
"traffic": "Organic Traffic",
|
||||
"cost": "Traffic Value",
|
||||
"keywords": "Keyword Count",
|
||||
"refdomains": "Referring Domains",
|
||||
}
|
||||
|
||||
for key, label in metric_labels.items():
|
||||
curr_val = self._extract_metric(current, key)
|
||||
prev_val = self._extract_metric(previous, key)
|
||||
if prev_val and prev_val > 0 and curr_val > prev_val:
|
||||
change_pct = ((curr_val - prev_val) / prev_val) * 100
|
||||
if change_pct >= 5.0:
|
||||
impact = "high" if change_pct >= 20 else ("medium" if change_pct >= 10 else "low")
|
||||
wins.append(WinConcern(
|
||||
category=label,
|
||||
description=f"{label} increased by {change_pct:+.1f}% ({prev_val:,.0f} -> {curr_val:,.0f})",
|
||||
impact=impact,
|
||||
action=f"Continue current {label.lower()} strategy",
|
||||
))
|
||||
return wins
|
||||
|
||||
def identify_concerns(
|
||||
self, current: dict, previous: dict
|
||||
) -> list[WinConcern]:
|
||||
"""Identify significant negative changes between periods."""
|
||||
concerns = []
|
||||
metric_labels = {
|
||||
"traffic": "Organic Traffic",
|
||||
"cost": "Traffic Value",
|
||||
"keywords": "Keyword Count",
|
||||
"refdomains": "Referring Domains",
|
||||
}
|
||||
|
||||
for key, label in metric_labels.items():
|
||||
curr_val = self._extract_metric(current, key)
|
||||
prev_val = self._extract_metric(previous, key)
|
||||
if prev_val and prev_val > 0 and curr_val < prev_val:
|
||||
change_pct = ((curr_val - prev_val) / prev_val) * 100
|
||||
if change_pct <= -5.0:
|
||||
impact = "high" if change_pct <= -20 else ("medium" if change_pct <= -10 else "low")
|
||||
actions = {
|
||||
"Organic Traffic": "Investigate traffic sources and algorithm updates",
|
||||
"Traffic Value": "Review keyword targeting and content quality",
|
||||
"Keyword Count": "Expand content coverage and optimize existing pages",
|
||||
"Referring Domains": "Strengthen link building outreach campaigns",
|
||||
}
|
||||
concerns.append(WinConcern(
|
||||
category=label,
|
||||
description=f"{label} decreased by {change_pct:.1f}% ({prev_val:,.0f} -> {curr_val:,.0f})",
|
||||
impact=impact,
|
||||
action=actions.get(label, f"Review {label.lower()} strategy"),
|
||||
))
|
||||
return concerns
|
||||
|
||||
def _extract_metric(self, data: dict, key: str) -> float:
|
||||
"""Extract a metric value from nested Ahrefs response."""
|
||||
if key in data:
|
||||
return float(data[key])
|
||||
organic = data.get("organic", {})
|
||||
if key in organic:
|
||||
return float(organic[key])
|
||||
return 0.0
|
||||
|
||||
def generate_executive_summary(
|
||||
self,
|
||||
wins: list[WinConcern],
|
||||
concerns: list[WinConcern],
|
||||
health_score: float,
|
||||
health_trend: str,
|
||||
traffic_value_change: float,
|
||||
) -> dict[str, Any]:
|
||||
"""Generate high-level executive summary."""
|
||||
summary = {
|
||||
"health_score": health_score,
|
||||
"health_trend": health_trend,
|
||||
"traffic_value_change_usd": round(traffic_value_change, 2),
|
||||
"total_wins": len(wins),
|
||||
"total_concerns": len(concerns),
|
||||
"top_wins": [
|
||||
{"category": w.category, "description": w.description, "impact": w.impact}
|
||||
for w in sorted(wins, key=lambda x: {"high": 0, "medium": 1, "low": 2}.get(x.impact, 3))[:5]
|
||||
],
|
||||
"top_concerns": [
|
||||
{"category": c.category, "description": c.description, "impact": c.impact}
|
||||
for c in sorted(concerns, key=lambda x: {"high": 0, "medium": 1, "low": 2}.get(x.impact, 3))[:5]
|
||||
],
|
||||
"overall_assessment": "",
|
||||
}
|
||||
|
||||
if health_score >= 75:
|
||||
summary["overall_assessment"] = "Strong performance - focus on maintaining momentum"
|
||||
elif health_score >= 50:
|
||||
summary["overall_assessment"] = "Moderate performance - targeted improvements needed"
|
||||
else:
|
||||
summary["overall_assessment"] = "Needs attention - prioritize fundamental improvements"
|
||||
|
||||
return summary
|
||||
|
||||
def generate_tactical_breakdown(
|
||||
self, current: dict, wins: list[WinConcern], concerns: list[WinConcern]
|
||||
) -> dict[str, Any]:
|
||||
"""Generate actionable next steps per dimension."""
|
||||
breakdown = {
|
||||
"traffic": {
|
||||
"status": "needs_review",
|
||||
"actions": [],
|
||||
},
|
||||
"rankings": {
|
||||
"status": "needs_review",
|
||||
"actions": [],
|
||||
},
|
||||
"links": {
|
||||
"status": "needs_review",
|
||||
"actions": [],
|
||||
},
|
||||
"content": {
|
||||
"status": "needs_review",
|
||||
"actions": [],
|
||||
},
|
||||
"technical": {
|
||||
"status": "needs_review",
|
||||
"actions": [],
|
||||
},
|
||||
}
|
||||
|
||||
traffic = self._extract_metric(current, "traffic")
|
||||
keywords = self._extract_metric(current, "keywords")
|
||||
refdomains = self._extract_metric(current, "refdomains")
|
||||
|
||||
# Traffic actions
|
||||
if traffic > 0:
|
||||
breakdown["traffic"]["status"] = "active"
|
||||
breakdown["traffic"]["actions"].append("Monitor top landing pages for traffic drops")
|
||||
breakdown["traffic"]["actions"].append("Identify new keyword opportunities in adjacent topics")
|
||||
else:
|
||||
breakdown["traffic"]["actions"].append("Establish organic traffic baseline with content strategy")
|
||||
|
||||
# Rankings actions
|
||||
if keywords > 0:
|
||||
breakdown["rankings"]["status"] = "active"
|
||||
breakdown["rankings"]["actions"].append(
|
||||
f"Optimize pages for {int(keywords)} tracked keywords"
|
||||
)
|
||||
breakdown["rankings"]["actions"].append("Target featured snippets for top-performing queries")
|
||||
else:
|
||||
breakdown["rankings"]["actions"].append("Begin keyword research and content mapping")
|
||||
|
||||
# Links actions
|
||||
if refdomains > 0:
|
||||
breakdown["links"]["status"] = "active"
|
||||
breakdown["links"]["actions"].append("Analyze top referring domains for partnership opportunities")
|
||||
breakdown["links"]["actions"].append("Monitor for lost backlinks and reclaim valuable links")
|
||||
else:
|
||||
breakdown["links"]["actions"].append("Develop link acquisition strategy with digital PR")
|
||||
|
||||
# Content actions
|
||||
breakdown["content"]["actions"].append("Audit content freshness and update older pages")
|
||||
breakdown["content"]["actions"].append("Identify content gaps using competitor analysis")
|
||||
|
||||
# Technical actions
|
||||
breakdown["technical"]["actions"].append("Run technical SEO audit for crawl issues")
|
||||
breakdown["technical"]["actions"].append("Verify Core Web Vitals pass thresholds")
|
||||
|
||||
# Enrich with win/concern context
|
||||
for w in wins:
|
||||
cat_lower = w.category.lower()
|
||||
if "traffic" in cat_lower and breakdown.get("traffic"):
|
||||
breakdown["traffic"]["status"] = "improving"
|
||||
if "keyword" in cat_lower and breakdown.get("rankings"):
|
||||
breakdown["rankings"]["status"] = "improving"
|
||||
if "domain" in cat_lower or "link" in cat_lower:
|
||||
breakdown["links"]["status"] = "improving"
|
||||
|
||||
for c in concerns:
|
||||
cat_lower = c.category.lower()
|
||||
if "traffic" in cat_lower and breakdown.get("traffic"):
|
||||
breakdown["traffic"]["status"] = "declining"
|
||||
breakdown["traffic"]["actions"].insert(0, c.action)
|
||||
if "keyword" in cat_lower and breakdown.get("rankings"):
|
||||
breakdown["rankings"]["status"] = "declining"
|
||||
breakdown["rankings"]["actions"].insert(0, c.action)
|
||||
|
||||
return breakdown
|
||||
|
||||
def compare_targets(
|
||||
self, current: dict, targets: dict
|
||||
) -> list[TargetProgress]:
|
||||
"""Compare current metrics against saved targets."""
|
||||
progress_list = []
|
||||
for key, target_val in targets.items():
|
||||
parts = key.split(".")
|
||||
metric_name = parts[-1] if len(parts) > 1 else key
|
||||
actual = self._extract_metric(current, metric_name)
|
||||
if actual == 0.0 and len(parts) > 1:
|
||||
# Try alternate key resolution
|
||||
actual = current.get(key, 0.0)
|
||||
if isinstance(actual, dict):
|
||||
actual = 0.0
|
||||
tp = TargetProgress(
|
||||
kpi_name=key,
|
||||
target=float(target_val),
|
||||
actual=float(actual),
|
||||
)
|
||||
tp.compute_progress()
|
||||
progress_list.append(tp)
|
||||
return progress_list
|
||||
|
||||
# ----- Main orchestration -----
|
||||
|
||||
async def report(
|
||||
self,
|
||||
url: str,
|
||||
period: str = "monthly",
|
||||
date_from: str | None = None,
|
||||
date_to: str | None = None,
|
||||
executive_only: bool = False,
|
||||
targets_path: str | None = None,
|
||||
) -> PerformanceReport:
|
||||
"""Orchestrate full performance report generation."""
|
||||
report = PerformanceReport(
|
||||
url=url,
|
||||
period=period,
|
||||
timestamp=datetime.now().isoformat(),
|
||||
)
|
||||
|
||||
# Determine date ranges
|
||||
report.date_from, report.date_to = get_date_range(period, date_from, date_to)
|
||||
prev_from, prev_to = get_previous_range(report.date_from, report.date_to)
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Fetch current and previous period data concurrently
|
||||
tasks = [
|
||||
self.get_metrics_history(session, url, report.date_from, report.date_to),
|
||||
self.get_metrics_history(session, url, prev_from, prev_to),
|
||||
self.get_current_metrics(session, url),
|
||||
self.get_dr_history(session, url, report.date_from, report.date_to),
|
||||
self.get_volume_history(session, url, report.date_from, report.date_to),
|
||||
]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
current_history = results[0] if not isinstance(results[0], Exception) else []
|
||||
previous_history = results[1] if not isinstance(results[1], Exception) else []
|
||||
current_snapshot = results[2] if not isinstance(results[2], Exception) else {}
|
||||
dr_history = results[3] if not isinstance(results[3], Exception) else []
|
||||
volume_history = results[4] if not isinstance(results[4], Exception) else []
|
||||
|
||||
for i, r in enumerate(results):
|
||||
if isinstance(r, Exception):
|
||||
report.errors.append(f"Data fetch error [{i}]: {r}")
|
||||
|
||||
# Calculate trends for key metrics
|
||||
for metric_key in ["traffic", "keywords", "cost", "refdomains"]:
|
||||
if current_history or previous_history:
|
||||
trend = self.calculate_period_comparison(
|
||||
current_history if isinstance(current_history, list) else [],
|
||||
previous_history if isinstance(previous_history, list) else [],
|
||||
metric_key,
|
||||
)
|
||||
report.trends[metric_key] = [asdict(t) for t in trend]
|
||||
|
||||
# Build previous snapshot for comparison
|
||||
previous_snapshot = {}
|
||||
if isinstance(previous_history, list) and previous_history:
|
||||
for entry in previous_history:
|
||||
for key in ("traffic", "cost", "keywords", "refdomains"):
|
||||
val = entry.get(key)
|
||||
if val is None:
|
||||
organic = entry.get("organic", {})
|
||||
val = organic.get(key)
|
||||
if val is not None:
|
||||
if key not in previous_snapshot:
|
||||
previous_snapshot[key] = []
|
||||
previous_snapshot[key].append(float(val))
|
||||
# Average the values
|
||||
previous_snapshot = {
|
||||
k: sum(v) / len(v) for k, v in previous_snapshot.items() if v
|
||||
}
|
||||
|
||||
# Identify wins and concerns
|
||||
if isinstance(current_snapshot, dict):
|
||||
report.wins = self.identify_wins(current_snapshot, previous_snapshot)
|
||||
report.concerns = self.identify_concerns(current_snapshot, previous_snapshot)
|
||||
else:
|
||||
report.wins = []
|
||||
report.concerns = []
|
||||
|
||||
# Calculate health score (simple heuristic)
|
||||
traffic = self._extract_metric(current_snapshot, "traffic") if isinstance(current_snapshot, dict) else 0
|
||||
keywords = self._extract_metric(current_snapshot, "keywords") if isinstance(current_snapshot, dict) else 0
|
||||
score_components = []
|
||||
if traffic > 0:
|
||||
score_components.append(min(100, traffic / 100))
|
||||
if keywords > 0:
|
||||
score_components.append(min(100, keywords / 50))
|
||||
if dr_history:
|
||||
latest_dr = dr_history[-1] if isinstance(dr_history, list) else {}
|
||||
dr_val = latest_dr.get("domain_rating", latest_dr.get("domainRating", 0))
|
||||
score_components.append(float(dr_val))
|
||||
report.health_score = round(
|
||||
sum(score_components) / max(len(score_components), 1), 1
|
||||
)
|
||||
|
||||
# Health trend
|
||||
win_count = len(report.wins)
|
||||
concern_count = len(report.concerns)
|
||||
if win_count > concern_count:
|
||||
report.health_trend = "improving"
|
||||
elif concern_count > win_count:
|
||||
report.health_trend = "declining"
|
||||
else:
|
||||
report.health_trend = "stable"
|
||||
|
||||
# Traffic value change
|
||||
curr_cost = self._extract_metric(current_snapshot, "cost") if isinstance(current_snapshot, dict) else 0
|
||||
prev_cost = previous_snapshot.get("cost", 0)
|
||||
report.traffic_value_change = round((curr_cost - prev_cost) / 100.0, 2)
|
||||
|
||||
# Executive summary
|
||||
report.executive_summary = self.generate_executive_summary(
|
||||
report.wins, report.concerns,
|
||||
report.health_score, report.health_trend,
|
||||
report.traffic_value_change,
|
||||
)
|
||||
|
||||
if not executive_only:
|
||||
# Tactical breakdown
|
||||
report.tactical_breakdown = self.generate_tactical_breakdown(
|
||||
current_snapshot if isinstance(current_snapshot, dict) else {},
|
||||
report.wins, report.concerns,
|
||||
)
|
||||
|
||||
# Target comparison
|
||||
if targets_path:
|
||||
try:
|
||||
targets_data = json.loads(Path(targets_path).read_text())
|
||||
# Use 30-day targets by default
|
||||
target_set = targets_data.get("30_day", targets_data)
|
||||
report.target_progress = self.compare_targets(
|
||||
current_snapshot if isinstance(current_snapshot, dict) else {},
|
||||
target_set,
|
||||
)
|
||||
except Exception as exc:
|
||||
report.errors.append(f"Targets load error: {exc}")
|
||||
|
||||
return report
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Output formatting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def format_text_report(report: PerformanceReport) -> str:
|
||||
"""Format performance report as human-readable text."""
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append(f"SEO Performance Report: {report.url}")
|
||||
lines.append(f"Period: {report.period} ({report.date_from} to {report.date_to})")
|
||||
lines.append(f"Generated: {report.timestamp}")
|
||||
lines.append("=" * 60)
|
||||
|
||||
# Executive Summary
|
||||
lines.append("\nEXECUTIVE SUMMARY")
|
||||
lines.append("-" * 40)
|
||||
es = report.executive_summary
|
||||
lines.append(f" Health Score: {es.get('health_score', 0)}/100")
|
||||
trend_arrow = {"improving": "^", "declining": "v", "stable": "="}.get(
|
||||
es.get("health_trend", "stable"), "="
|
||||
)
|
||||
lines.append(f" Trend: {trend_arrow} {es.get('health_trend', 'stable')}")
|
||||
lines.append(f" Traffic Value Change: ${es.get('traffic_value_change_usd', 0):,.2f}")
|
||||
lines.append(f" Assessment: {es.get('overall_assessment', 'N/A')}")
|
||||
|
||||
# Wins
|
||||
lines.append(f"\n Top Wins ({es.get('total_wins', 0)} total):")
|
||||
for w in es.get("top_wins", []):
|
||||
impact_marker = {"high": "!!!", "medium": "!!", "low": "!"}.get(w.get("impact", "low"), "!")
|
||||
lines.append(f" {impact_marker} [{w.get('category', '')}] {w.get('description', '')}")
|
||||
|
||||
# Concerns
|
||||
lines.append(f"\n Top Concerns ({es.get('total_concerns', 0)} total):")
|
||||
for c in es.get("top_concerns", []):
|
||||
impact_marker = {"high": "!!!", "medium": "!!", "low": "!"}.get(c.get("impact", "low"), "!")
|
||||
lines.append(f" {impact_marker} [{c.get('category', '')}] {c.get('description', '')}")
|
||||
|
||||
# Trends
|
||||
if report.trends:
|
||||
lines.append("\nTRENDS")
|
||||
lines.append("-" * 40)
|
||||
for metric_name, trend_list in report.trends.items():
|
||||
for t in trend_list:
|
||||
if isinstance(t, dict):
|
||||
dir_arrow = {"up": "^", "down": "v", "stable": "="}.get(
|
||||
t.get("direction", "stable"), "="
|
||||
)
|
||||
change_str = f" ({t.get('change_pct', 0):+.1f}%)" if t.get("change_pct") is not None else ""
|
||||
lines.append(f" {dir_arrow} {metric_name}: {t.get('value', 0):,.2f}{change_str}")
|
||||
|
||||
# Tactical Breakdown
|
||||
if report.tactical_breakdown:
|
||||
lines.append("\nTACTICAL BREAKDOWN")
|
||||
lines.append("-" * 40)
|
||||
for dim_name, dim_data in report.tactical_breakdown.items():
|
||||
status = dim_data.get("status", "unknown")
|
||||
status_marker = {
|
||||
"improving": "^", "declining": "v", "active": "=", "needs_review": "?"
|
||||
}.get(status, "?")
|
||||
lines.append(f"\n [{dim_name.upper()}] Status: {status_marker} {status}")
|
||||
for action in dim_data.get("actions", [])[:3]:
|
||||
lines.append(f" > {action}")
|
||||
|
||||
# Target Progress
|
||||
if report.target_progress:
|
||||
lines.append("\nTARGET PROGRESS")
|
||||
lines.append("-" * 40)
|
||||
for tp in report.target_progress:
|
||||
if isinstance(tp, TargetProgress):
|
||||
bar_filled = int(min(tp.progress_pct, 100) / 5)
|
||||
bar = "#" * bar_filled + "-" * (20 - bar_filled)
|
||||
lines.append(
|
||||
f" {tp.kpi_name}: [{bar}] {tp.progress_pct:.0f}% "
|
||||
f"(actual: {tp.actual:,.0f} / target: {tp.target:,.0f})"
|
||||
)
|
||||
|
||||
# Errors
|
||||
if report.errors:
|
||||
lines.append("\nERRORS")
|
||||
lines.append("-" * 40)
|
||||
for err in report.errors:
|
||||
lines.append(f" ! {err}")
|
||||
|
||||
lines.append("\n" + "=" * 60)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def serialize_report(report: PerformanceReport) -> dict:
|
||||
"""Serialize PerformanceReport to JSON-safe dictionary."""
|
||||
data = {
|
||||
"url": report.url,
|
||||
"period": report.period,
|
||||
"date_from": report.date_from,
|
||||
"date_to": report.date_to,
|
||||
"health_score": report.health_score,
|
||||
"health_trend": report.health_trend,
|
||||
"trends": report.trends,
|
||||
"wins": [asdict(w) for w in report.wins],
|
||||
"concerns": [asdict(c) for c in report.concerns],
|
||||
"executive_summary": report.executive_summary,
|
||||
"tactical_breakdown": report.tactical_breakdown,
|
||||
"target_progress": [asdict(tp) for tp in report.target_progress],
|
||||
"traffic_value_change": report.traffic_value_change,
|
||||
"timestamp": report.timestamp,
|
||||
"errors": report.errors,
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command-line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="SEO Performance Reporter - Period-over-period analysis"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--url", required=True, help="Target URL or domain"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--period", choices=["monthly", "quarterly", "yearly", "custom"],
|
||||
default="monthly", help="Report period (default: monthly)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--from", dest="date_from", type=str, default=None,
|
||||
help="Start date (YYYY-MM-DD) for custom period"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--to", dest="date_to", type=str, default=None,
|
||||
help="End date (YYYY-MM-DD) for custom period"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--executive", action="store_true",
|
||||
help="Generate executive summary only"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--targets", type=str, default=None,
|
||||
help="Path to targets JSON file for progress comparison"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json", action="store_true",
|
||||
help="Output results as JSON"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output", type=str, default=None,
|
||||
help="Save output to file path"
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
"""Main entry point."""
|
||||
args = parse_args()
|
||||
|
||||
reporter = PerformanceReporter()
|
||||
result = await reporter.report(
|
||||
url=args.url,
|
||||
period=args.period,
|
||||
date_from=args.date_from,
|
||||
date_to=args.date_to,
|
||||
executive_only=args.executive,
|
||||
targets_path=args.targets,
|
||||
)
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(serialize_report(result), indent=2, ensure_ascii=False)
|
||||
else:
|
||||
output = format_text_report(result)
|
||||
|
||||
if args.output:
|
||||
Path(args.output).write_text(output, encoding="utf-8")
|
||||
logger.info(f"Output saved to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
reporter.print_stats()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,8 @@
|
||||
# 25-seo-kpi-framework dependencies
|
||||
requests>=2.31.0
|
||||
aiohttp>=3.9.0
|
||||
pandas>=2.1.0
|
||||
tenacity>=8.2.0
|
||||
tqdm>=4.66.0
|
||||
python-dotenv>=1.0.0
|
||||
rich>=13.7.0
|
||||
Reference in New Issue
Block a user