Restructure skill numbering: SEO 11-30, GTM 60-69, reserve 19-28 for future skills

Renumber 12 existing skills to new ranges:
- SEO: 11→13, 12→18, 13→16, 14→17, 15→14, 16→15, 17→29, 18→30, 19→12
- GTM: 20→60, 21→61, 22→62

Update cross-references in gateway architect/builder skills, GTM guardian
README, CLAUDE.md (skill tables + directory layout), and AGENTS.md
(domain routing ranges).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-13 02:37:39 +09:00
parent 912d0e2810
commit 7c5efea817
149 changed files with 52 additions and 33 deletions

View File

@@ -0,0 +1,334 @@
#!/usr/bin/env python3
"""
Keyword Analyzer for SEO Gateway Pages
Analyzes keywords and generates SEO strategy recommendations
"""
import json
from typing import Dict, List, Tuple
from dataclasses import dataclass
from datetime import datetime
@dataclass
class KeywordData:
"""Data structure for keyword information"""
keyword: str
search_volume: int
difficulty: float
intent: str
cpc: float = 0.0
trend: str = "stable"
class KeywordAnalyzer:
"""Analyzes keywords for SEO gateway pages"""
def __init__(self, primary_keyword: str):
self.primary_keyword = primary_keyword
self.results = {
"primary": None,
"lsi": [],
"long_tail": [],
"questions": [],
"intent_distribution": {},
"recommendations": []
}
def analyze_primary_keyword(self) -> KeywordData:
"""
Analyzes the primary keyword
In production, this would call actual keyword research APIs
"""
# Simulated data - replace with actual API calls
keyword_data = {
"눈 성형": {"volume": 12000, "difficulty": 65, "intent": "informational", "cpc": 2500},
"이마 성형": {"volume": 5500, "difficulty": 55, "intent": "informational", "cpc": 3000},
"동안 성형": {"volume": 8000, "difficulty": 70, "intent": "comparative", "cpc": 2800},
}
data = keyword_data.get(self.primary_keyword, {
"volume": 1000,
"difficulty": 50,
"intent": "informational",
"cpc": 1000
})
self.results["primary"] = KeywordData(
keyword=self.primary_keyword,
search_volume=data["volume"],
difficulty=data["difficulty"],
intent=data["intent"],
cpc=data["cpc"]
)
return self.results["primary"]
def generate_lsi_keywords(self) -> List[KeywordData]:
"""Generates LSI (Latent Semantic Indexing) keywords"""
lsi_patterns = {
"눈 성형": [
("쌍꺼풀 수술", 8000, "transactional"),
("눈매교정", 5500, "informational"),
("앞트임", 4000, "informational"),
("뒤트임", 3500, "informational"),
("눈 성형 비용", 2000, "comparative"),
("눈 성형 부작용", 1500, "informational"),
("눈 성형 회복기간", 1800, "informational"),
("눈 성형 전후", 3000, "comparative"),
("남자 눈 성형", 2500, "informational"),
("눈 성형 잘하는곳", 2200, "comparative")
],
"이마 성형": [
("이마거상술", 3000, "informational"),
("이마축소술", 2500, "informational"),
("헤어라인교정", 4000, "transactional"),
("이마 성형 비용", 1200, "comparative"),
("이마 보톡스", 6000, "transactional"),
("M자 탈모 수술", 5000, "informational"),
("이마 필러", 4500, "transactional"),
("이마 성형 부작용", 800, "informational"),
("이마 리프팅", 3500, "comparative"),
("이마 주름 제거", 2800, "transactional")
],
"동안 성형": [
("안면 리프팅", 7000, "transactional"),
("실리프팅", 9000, "transactional"),
("보톡스 시술", 15000, "transactional"),
("필러 시술", 12000, "transactional"),
("동안 성형 비용", 2500, "comparative"),
("울쎄라", 8000, "comparative"),
("써마지", 6500, "comparative"),
("동안 시술 종류", 1800, "informational"),
("주름 제거 시술", 4000, "transactional"),
("동안 성형 추천", 2200, "comparative")
]
}
lsi_list = lsi_patterns.get(self.primary_keyword, [
(f"{self.primary_keyword} 비용", 1000, "comparative"),
(f"{self.primary_keyword} 부작용", 800, "informational"),
(f"{self.primary_keyword} 후기", 1200, "comparative"),
])
for keyword, volume, intent in lsi_list:
self.results["lsi"].append(KeywordData(
keyword=keyword,
search_volume=volume,
difficulty=45 + (volume/1000), # Simple difficulty calculation
intent=intent
))
return self.results["lsi"]
def generate_long_tail_keywords(self) -> List[str]:
"""Generates long-tail keyword variations"""
location_modifiers = ["강남", "신사", "청담", "압구정", "서울"]
action_modifiers = ["잘하는곳", "추천", "유명한", "전문", "비용"]
long_tails = []
for location in location_modifiers:
long_tails.append(f"{location} {self.primary_keyword}")
for action in action_modifiers[:2]: # Limit combinations
long_tails.append(f"{location} {self.primary_keyword} {action}")
self.results["long_tail"] = long_tails
return long_tails
def generate_question_keywords(self) -> List[str]:
"""Generates question-based keywords for featured snippets"""
question_templates = [
f"{self.primary_keyword} 비용은 얼마인가요?",
f"{self.primary_keyword} 회복기간은 얼마나 걸리나요?",
f"{self.primary_keyword} 부작용이 있나요?",
f"{self.primary_keyword} 통증이 심한가요?",
f"{self.primary_keyword} 효과는 얼마나 지속되나요?",
f"{self.primary_keyword} 나이 제한이 있나요?",
f"{self.primary_keyword} 후 주의사항은 무엇인가요?"
]
self.results["questions"] = question_templates
return question_templates
def calculate_intent_distribution(self) -> Dict[str, float]:
"""Calculates user intent distribution across keywords"""
intent_counts = {
"informational": 0,
"comparative": 0,
"transactional": 0,
"navigational": 0
}
# Count primary keyword intent
if self.results["primary"]:
intent_counts[self.results["primary"].intent] += self.results["primary"].search_volume
# Count LSI keyword intents
for kw in self.results["lsi"]:
intent_counts[kw.intent] += kw.search_volume
# Calculate percentages
total_volume = sum(intent_counts.values())
if total_volume > 0:
self.results["intent_distribution"] = {
intent: round((count/total_volume) * 100, 1)
for intent, count in intent_counts.items()
if count > 0
}
return self.results["intent_distribution"]
def generate_recommendations(self) -> List[str]:
"""Generates SEO recommendations based on analysis"""
recommendations = []
# Based on search volume
if self.results["primary"] and self.results["primary"].search_volume > 10000:
recommendations.append("High search volume detected - prioritize this page for development")
# Based on intent distribution
intent_dist = self.results["intent_distribution"]
if intent_dist.get("informational", 0) > 50:
recommendations.append("Focus on educational content and comprehensive guides")
if intent_dist.get("comparative", 0) > 30:
recommendations.append("Include comparison tables and competitive differentiators")
if intent_dist.get("transactional", 0) > 20:
recommendations.append("Optimize conversion elements and CTAs above the fold")
# Based on competition
if self.results["primary"] and self.results["primary"].difficulty > 60:
recommendations.append("High competition - invest in quality content and backlinks")
recommendations.append("Target long-tail keywords for quicker wins")
# Question keywords
if len(self.results["questions"]) > 5:
recommendations.append("Implement FAQ schema markup for featured snippets")
self.results["recommendations"] = recommendations
return recommendations
def export_analysis(self, filename: str = None) -> str:
"""Exports the analysis results to JSON"""
if not filename:
filename = f"keyword_analysis_{self.primary_keyword.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d')}.json"
export_data = {
"analysis_date": datetime.now().isoformat(),
"primary_keyword": self.primary_keyword,
"primary_data": {
"keyword": self.results["primary"].keyword,
"search_volume": self.results["primary"].search_volume,
"difficulty": self.results["primary"].difficulty,
"intent": self.results["primary"].intent
} if self.results["primary"] else None,
"lsi_keywords": [
{
"keyword": kw.keyword,
"volume": kw.search_volume,
"intent": kw.intent
} for kw in self.results["lsi"]
],
"long_tail_keywords": self.results["long_tail"],
"question_keywords": self.results["questions"],
"intent_distribution": self.results["intent_distribution"],
"recommendations": self.results["recommendations"]
}
with open(filename, 'w', encoding='utf-8') as f:
json.dump(export_data, f, ensure_ascii=False, indent=2)
return filename
def generate_report(self) -> str:
"""Generates a formatted text report"""
report = f"""
# Keyword Analysis Report
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}
## Primary Keyword: {self.primary_keyword}
- Search Volume: {self.results['primary'].search_volume:,}
- Difficulty: {self.results['primary'].difficulty}/100
- Primary Intent: {self.results['primary'].intent.capitalize()}
## LSI Keywords (Top 10)
"""
for i, kw in enumerate(self.results['lsi'][:10], 1):
report += f"{i}. {kw.keyword} - Volume: {kw.search_volume:,} ({kw.intent})\n"
report += f"\n## User Intent Distribution\n"
for intent, percentage in self.results['intent_distribution'].items():
report += f"- {intent.capitalize()}: {percentage}%\n"
report += f"\n## Long-tail Opportunities\n"
for keyword in self.results['long_tail'][:5]:
report += f"- {keyword}\n"
report += f"\n## Question Keywords (FAQ Optimization)\n"
for question in self.results['questions'][:5]:
report += f"- {question}\n"
report += f"\n## Strategic Recommendations\n"
for i, rec in enumerate(self.results['recommendations'], 1):
report += f"{i}. {rec}\n"
return report
def main():
"""Main execution function"""
import argparse
parser = argparse.ArgumentParser(
description='Analyze keywords for SEO gateway page strategy',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''
Examples:
python keyword_analyzer.py --topic "눈 성형"
python keyword_analyzer.py --topic "이마 성형" --market "강남"
python keyword_analyzer.py --topic "동안 성형" --output strategy.json
'''
)
parser.add_argument('--topic', '-t', required=True,
help='Primary keyword to analyze (e.g., "눈 성형")')
parser.add_argument('--market', '-m', default=None,
help='Target market/location (e.g., "강남")')
parser.add_argument('--output', '-o', default=None,
help='Output JSON file path')
parser.add_argument('--competitors', '-c', default=None,
help='Comma-separated competitor URLs for analysis')
args = parser.parse_args()
keyword = args.topic
if args.market:
keyword = f"{args.market} {args.topic}"
print(f"Analyzing keyword: {keyword}")
print("-" * 50)
analyzer = KeywordAnalyzer(keyword)
# Run analysis
analyzer.analyze_primary_keyword()
analyzer.generate_lsi_keywords()
analyzer.generate_long_tail_keywords()
analyzer.generate_question_keywords()
analyzer.calculate_intent_distribution()
analyzer.generate_recommendations()
# Generate and print report
report = analyzer.generate_report()
print(report)
# Export to JSON
filename = analyzer.export_analysis(args.output)
print(f"\nAnalysis exported to: {filename}")
if __name__ == "__main__":
main()