Files
Andrew Yim a3ff965b87 Add SEO skills 19-28, 31-32 with full Python implementations
12 new skills: Keyword Strategy, SERP Analysis, Position Tracking,
Link Building, Content Strategy, E-Commerce SEO, KPI Framework,
International SEO, AI Visibility, Knowledge Graph, Competitor Intel,
and Crawl Budget. ~20K lines of Python across 25 domain scripts.
Updated skill 11 pipeline table and repo CLAUDE.md.
Enhanced skill 18 local SEO workflow from jamie.clinic audit.

Note: Skill 26 hreflang_validator.py pending (content filter block).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 12:05:59 +09:00

595 lines
21 KiB
Python

"""
AI Visibility Tracker - Brand Radar Monitoring
================================================
Purpose: Track brand visibility in AI-generated search answers
using Ahrefs Brand Radar APIs.
Python: 3.10+
Usage:
python ai_visibility_tracker.py --target example.com --json
python ai_visibility_tracker.py --target example.com --competitor comp1.com --json
python ai_visibility_tracker.py --target example.com --history --json
python ai_visibility_tracker.py --target example.com --sov --json
"""
import argparse
import asyncio
import json
import logging
import subprocess
import sys
from dataclasses import dataclass, field, asdict
from datetime import datetime
from pathlib import Path
from typing import Any
# Add parent to path for base_client import
sys.path.insert(0, str(Path(__file__).parent))
from base_client import BaseAsyncClient, config
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Data classes
# ---------------------------------------------------------------------------
@dataclass
class ImpressionMetrics:
"""AI search impression metrics for a brand."""
total: int = 0
trend: str = "stable" # increasing, decreasing, stable
change_pct: float = 0.0
period: str = ""
breakdown: dict = field(default_factory=dict)
@dataclass
class MentionMetrics:
"""AI search mention metrics for a brand."""
total: int = 0
trend: str = "stable"
change_pct: float = 0.0
period: str = ""
breakdown: dict = field(default_factory=dict)
@dataclass
class SovMetric:
"""Share of Voice metric for a single domain."""
domain: str = ""
sov_pct: float = 0.0
change_pct: float = 0.0
@dataclass
class HistoryPoint:
"""Single data point in a time series."""
date: str = ""
value: float = 0.0
@dataclass
class CompetitorVisibility:
"""Aggregated AI visibility metrics for a competitor domain."""
domain: str = ""
impressions: int = 0
mentions: int = 0
sov: float = 0.0
@dataclass
class AiVisibilityResult:
"""Complete AI visibility tracking result."""
target: str = ""
impressions: ImpressionMetrics = field(default_factory=ImpressionMetrics)
mentions: MentionMetrics = field(default_factory=MentionMetrics)
share_of_voice: dict = field(default_factory=dict)
impressions_history: list[HistoryPoint] = field(default_factory=list)
mentions_history: list[HistoryPoint] = field(default_factory=list)
sov_history: list[HistoryPoint] = field(default_factory=list)
competitors: list[CompetitorVisibility] = field(default_factory=list)
recommendations: list[str] = field(default_factory=list)
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
def to_dict(self) -> dict:
"""Convert result to dictionary."""
return {
"target": self.target,
"impressions": asdict(self.impressions),
"mentions": asdict(self.mentions),
"share_of_voice": self.share_of_voice,
"impressions_history": [asdict(h) for h in self.impressions_history],
"mentions_history": [asdict(h) for h in self.mentions_history],
"sov_history": [asdict(h) for h in self.sov_history],
"competitors": [asdict(c) for c in self.competitors],
"recommendations": self.recommendations,
"timestamp": self.timestamp,
}
# ---------------------------------------------------------------------------
# MCP tool caller helper
# ---------------------------------------------------------------------------
def call_mcp_tool(tool_name: str, params: dict) -> dict:
"""
Call an Ahrefs MCP tool and return the parsed JSON response.
In Claude Desktop / Claude Code environments the MCP tools are invoked
directly by the AI agent. This helper exists so that the script can also
be executed standalone via subprocess for testing purposes.
"""
logger.info(f"Calling MCP tool: {tool_name} with params: {params}")
try:
cmd = ["claude", "mcp", "call", "ahrefs", tool_name, json.dumps(params)]
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
if result.returncode == 0 and result.stdout.strip():
return json.loads(result.stdout.strip())
logger.warning(f"MCP tool {tool_name} returned non-zero or empty: {result.stderr}")
return {}
except (subprocess.TimeoutExpired, json.JSONDecodeError, FileNotFoundError) as exc:
logger.warning(f"MCP call failed ({exc}). Returning empty dict.")
return {}
# ---------------------------------------------------------------------------
# AI Visibility Tracker
# ---------------------------------------------------------------------------
class AiVisibilityTracker(BaseAsyncClient):
"""Track brand visibility across AI-generated search results."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(self.__class__.__name__)
# ---- Impressions ----
async def get_impressions_overview(self, target: str) -> ImpressionMetrics:
"""Fetch current AI impression metrics via brand-radar-impressions-overview."""
self.logger.info(f"Fetching impressions overview for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-impressions-overview",
{"target": target},
)
metrics = ImpressionMetrics()
if not data:
return metrics
metrics.total = data.get("total_impressions", data.get("impressions", 0))
metrics.change_pct = data.get("change_pct", data.get("change", 0.0))
metrics.period = data.get("period", "")
metrics.breakdown = data.get("breakdown", {})
if metrics.change_pct > 5:
metrics.trend = "increasing"
elif metrics.change_pct < -5:
metrics.trend = "decreasing"
else:
metrics.trend = "stable"
return metrics
# ---- Mentions ----
async def get_mentions_overview(self, target: str) -> MentionMetrics:
"""Fetch current AI mention metrics via brand-radar-mentions-overview."""
self.logger.info(f"Fetching mentions overview for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-mentions-overview",
{"target": target},
)
metrics = MentionMetrics()
if not data:
return metrics
metrics.total = data.get("total_mentions", data.get("mentions", 0))
metrics.change_pct = data.get("change_pct", data.get("change", 0.0))
metrics.period = data.get("period", "")
metrics.breakdown = data.get("breakdown", {})
if metrics.change_pct > 5:
metrics.trend = "increasing"
elif metrics.change_pct < -5:
metrics.trend = "decreasing"
else:
metrics.trend = "stable"
return metrics
# ---- Share of Voice ----
async def get_sov_overview(self, target: str) -> dict:
"""Fetch Share of Voice overview via brand-radar-sov-overview."""
self.logger.info(f"Fetching SOV overview for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-sov-overview",
{"target": target},
)
if not data:
return {"brand_sov": 0.0, "competitors": []}
brand_sov = data.get("sov", data.get("share_of_voice", 0.0))
competitors_raw = data.get("competitors", [])
competitors = []
for comp in competitors_raw:
competitors.append(SovMetric(
domain=comp.get("domain", ""),
sov_pct=comp.get("sov", comp.get("share_of_voice", 0.0)),
change_pct=comp.get("change_pct", 0.0),
))
return {
"brand_sov": brand_sov,
"competitors": [asdict(c) for c in competitors],
}
# ---- History ----
async def get_impressions_history(self, target: str) -> list[HistoryPoint]:
"""Fetch impressions history via brand-radar-impressions-history."""
self.logger.info(f"Fetching impressions history for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-impressions-history",
{"target": target},
)
return self._parse_history(data)
async def get_mentions_history(self, target: str) -> list[HistoryPoint]:
"""Fetch mentions history via brand-radar-mentions-history."""
self.logger.info(f"Fetching mentions history for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-mentions-history",
{"target": target},
)
return self._parse_history(data)
async def get_sov_history(self, target: str) -> list[HistoryPoint]:
"""Fetch SOV history via brand-radar-sov-history."""
self.logger.info(f"Fetching SOV history for {target}")
data = await asyncio.to_thread(
call_mcp_tool,
"brand-radar-sov-history",
{"target": target},
)
return self._parse_history(data)
def _parse_history(self, data: dict | list) -> list[HistoryPoint]:
"""Parse history data from MCP response into HistoryPoint list."""
points: list[HistoryPoint] = []
if not data:
return points
items = data if isinstance(data, list) else data.get("history", data.get("data", []))
for item in items:
if isinstance(item, dict):
points.append(HistoryPoint(
date=item.get("date", item.get("period", "")),
value=item.get("value", item.get("impressions", item.get("mentions", item.get("sov", 0.0)))),
))
return points
# ---- Competitor Comparison ----
async def compare_competitors(
self, target: str, competitors: list[str]
) -> list[CompetitorVisibility]:
"""Aggregate AI visibility metrics for target and competitors."""
self.logger.info(f"Comparing competitors: {competitors}")
results: list[CompetitorVisibility] = []
all_domains = [target] + competitors
for domain in all_domains:
imp = await self.get_impressions_overview(domain)
men = await self.get_mentions_overview(domain)
sov_data = await self.get_sov_overview(domain)
results.append(CompetitorVisibility(
domain=domain,
impressions=imp.total,
mentions=men.total,
sov=sov_data.get("brand_sov", 0.0),
))
# Sort by SOV descending
results.sort(key=lambda x: x.sov, reverse=True)
return results
# ---- Trend Calculation ----
@staticmethod
def calculate_trends(history: list[HistoryPoint]) -> dict:
"""Determine trend direction and statistics from history data."""
if not history or len(history) < 2:
return {
"direction": "insufficient_data",
"avg_value": 0.0,
"min_value": 0.0,
"max_value": 0.0,
"change_pct": 0.0,
"data_points": len(history) if history else 0,
}
values = [h.value for h in history]
first_value = values[0]
last_value = values[-1]
avg_value = sum(values) / len(values)
min_value = min(values)
max_value = max(values)
if first_value > 0:
change_pct = ((last_value - first_value) / first_value) * 100
else:
change_pct = 0.0
if change_pct > 10:
direction = "strongly_increasing"
elif change_pct > 3:
direction = "increasing"
elif change_pct < -10:
direction = "strongly_decreasing"
elif change_pct < -3:
direction = "decreasing"
else:
direction = "stable"
return {
"direction": direction,
"avg_value": round(avg_value, 2),
"min_value": round(min_value, 2),
"max_value": round(max_value, 2),
"change_pct": round(change_pct, 2),
"data_points": len(values),
}
# ---- Recommendations ----
@staticmethod
def generate_recommendations(result: AiVisibilityResult) -> list[str]:
"""Generate actionable recommendations for improving AI visibility."""
recs: list[str] = []
# Impression-based recommendations
if result.impressions.total == 0:
recs.append(
"AI 검색에서 브랜드 노출이 감지되지 않았습니다. "
"E-E-A-T 시그널(경험, 전문성, 권위성, 신뢰성)을 강화하여 "
"AI 엔진이 콘텐츠를 참조할 수 있도록 하세요."
)
elif result.impressions.trend == "decreasing":
recs.append(
"AI 검색 노출이 감소 추세입니다. 최신 콘텐츠 업데이트 및 "
"구조화된 데이터(Schema Markup) 추가를 검토하세요."
)
elif result.impressions.trend == "increasing":
recs.append(
"AI 검색 노출이 증가 추세입니다. 현재 콘텐츠 전략을 "
"유지하면서 추가 키워드 확장을 고려하세요."
)
# Mention-based recommendations
if result.mentions.total == 0:
recs.append(
"AI 응답에서 브랜드 언급이 없습니다. "
"브랜드명이 포함된 고품질 콘텐츠를 제작하고, "
"외부 사이트에서의 브랜드 언급(Citations)을 늘리세요."
)
elif result.mentions.trend == "decreasing":
recs.append(
"AI 응답 내 브랜드 언급이 줄어들고 있습니다. "
"콘텐츠 신선도(Freshness)와 업계 권위 신호를 점검하세요."
)
# SOV recommendations
sov_value = result.share_of_voice.get("brand_sov", 0.0)
if sov_value < 10:
recs.append(
f"AI 검색 Share of Voice가 {sov_value}%로 낮습니다. "
"핵심 키워드에 대한 종합 가이드, FAQ 콘텐츠, "
"원본 데이터/연구 자료를 발행하여 인용 가능성을 높이세요."
)
elif sov_value < 25:
recs.append(
f"AI 검색 Share of Voice가 {sov_value}%입니다. "
"경쟁사 대비 차별화된 전문 콘텐츠와 "
"독점 데이터 기반 인사이트를 강화하세요."
)
# Competitor-based recommendations
if result.competitors:
top_competitor = result.competitors[0]
if top_competitor.domain != result.target and top_competitor.sov > sov_value:
recs.append(
f"최대 경쟁사 {top_competitor.domain}의 SOV가 "
f"{top_competitor.sov}%로 앞서고 있습니다. "
"해당 경쟁사의 콘텐츠 전략과 인용 패턴을 분석하세요."
)
# General best practices
recs.append(
"AI 검색 최적화를 위해 다음 사항을 지속적으로 점검하세요: "
"(1) 구조화된 데이터(JSON-LD) 적용, "
"(2) FAQ 및 How-to 콘텐츠 발행, "
"(3) 신뢰할 수 있는 외부 사이트에서의 백링크 확보, "
"(4) 콘텐츠 정기 업데이트 및 정확성 검증."
)
return recs
# ---- Main Orchestrator ----
async def track(
self,
target: str,
competitors: list[str] | None = None,
include_history: bool = False,
include_sov: bool = False,
) -> AiVisibilityResult:
"""
Orchestrate full AI visibility tracking.
Args:
target: Domain to track
competitors: Optional list of competitor domains
include_history: Whether to fetch historical trends
include_sov: Whether to include SOV analysis
"""
self.logger.info(f"Starting AI visibility tracking for {target}")
result = AiVisibilityResult(target=target)
# Core metrics (always fetched)
result.impressions = await self.get_impressions_overview(target)
result.mentions = await self.get_mentions_overview(target)
# Share of Voice
if include_sov or competitors:
result.share_of_voice = await self.get_sov_overview(target)
# History
if include_history:
result.impressions_history = await self.get_impressions_history(target)
result.mentions_history = await self.get_mentions_history(target)
if include_sov:
result.sov_history = await self.get_sov_history(target)
# Competitor comparison
if competitors:
result.competitors = await self.compare_competitors(target, competitors)
# Generate recommendations
result.recommendations = self.generate_recommendations(result)
self.print_stats()
return result
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def build_parser() -> argparse.ArgumentParser:
"""Build argument parser for CLI usage."""
parser = argparse.ArgumentParser(
description="AI Visibility Tracker - Monitor brand visibility in AI search results",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --target example.com --json
%(prog)s --target example.com --competitor comp1.com --competitor comp2.com --json
%(prog)s --target example.com --history --sov --json
%(prog)s --target example.com --output report.json
""",
)
parser.add_argument(
"--target", required=True,
help="Target domain to track (e.g., example.com)",
)
parser.add_argument(
"--competitor", action="append", default=[],
help="Competitor domain (repeatable). e.g., --competitor a.com --competitor b.com",
)
parser.add_argument(
"--history", action="store_true",
help="Include historical trend data (impressions, mentions, SOV over time)",
)
parser.add_argument(
"--sov", action="store_true",
help="Include Share of Voice analysis",
)
parser.add_argument(
"--json", action="store_true",
help="Output result as JSON to stdout",
)
parser.add_argument(
"--output", type=str, default=None,
help="Save JSON output to file path",
)
return parser
def print_summary(result: AiVisibilityResult) -> None:
"""Print a human-readable summary of AI visibility results."""
print("\n" + "=" * 60)
print(f" AI Visibility Report: {result.target}")
print("=" * 60)
print(f"\n Impressions: {result.impressions.total:,}")
print(f" Trend: {result.impressions.trend} ({result.impressions.change_pct:+.1f}%)")
print(f"\n Mentions: {result.mentions.total:,}")
print(f" Trend: {result.mentions.trend} ({result.mentions.change_pct:+.1f}%)")
if result.share_of_voice:
sov = result.share_of_voice.get("brand_sov", 0.0)
print(f"\n Share of Voice: {sov:.1f}%")
comp_list = result.share_of_voice.get("competitors", [])
if comp_list:
print(" Competitors:")
for c in comp_list:
print(f" {c.get('domain', '?')}: {c.get('sov_pct', 0):.1f}%")
if result.impressions_history:
trend_info = AiVisibilityTracker.calculate_trends(result.impressions_history)
print(f"\n Impressions Trend: {trend_info['direction']}")
print(f" Range: {trend_info['min_value']:,.0f} - {trend_info['max_value']:,.0f}")
print(f" Change: {trend_info['change_pct']:+.1f}%")
if result.competitors:
print("\n Competitor Comparison:")
for cv in result.competitors:
marker = " <-- target" if cv.domain == result.target else ""
print(f" {cv.domain}: SOV={cv.sov:.1f}%, Imp={cv.impressions:,}, Men={cv.mentions:,}{marker}")
if result.recommendations:
print("\n Recommendations:")
for i, rec in enumerate(result.recommendations, 1):
print(f" {i}. {rec}")
print("\n" + "=" * 60)
print(f" Generated: {result.timestamp}")
print("=" * 60 + "\n")
async def main() -> None:
"""CLI entry point."""
parser = build_parser()
args = parser.parse_args()
tracker = AiVisibilityTracker(
max_concurrent=5,
requests_per_second=2.0,
)
result = await tracker.track(
target=args.target,
competitors=args.competitor if args.competitor else None,
include_history=args.history,
include_sov=args.sov,
)
# Output
if args.json or args.output:
output_data = result.to_dict()
json_str = json.dumps(output_data, ensure_ascii=False, indent=2)
if args.json:
print(json_str)
if args.output:
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json_str, encoding="utf-8")
logger.info(f"Report saved to {args.output}")
else:
print_summary(result)
if __name__ == "__main__":
asyncio.run(main())