Initial commit: Claude Skills Factory with 8 refined custom skills

Custom Skills (ourdigital-custom-skills/):
- 00-ourdigital-visual-storytelling: Blog featured image prompt generator
- 01-ourdigital-research-publisher: Research-to-publication workflow
- 02-notion-organizer: Notion workspace management
- 03-research-to-presentation: Notion research to PPT/Figma
- 04-seo-gateway-strategist: SEO gateway page strategy planning
- 05-gateway-page-content-builder: Gateway page content generation
- 20-jamie-brand-editor: Jamie Clinic branded content GENERATION
- 21-jamie-brand-guardian: Jamie Clinic content REVIEW & evaluation

Refinements applied:
- All skills converted to SKILL.md format with YAML frontmatter
- Added version fields to all skills
- Flattened nested folder structures
- Removed packaging artifacts (.zip, .skill files)
- Reorganized file structures (scripts/, references/, etc.)
- Differentiated Jamie skills with clear roles

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-10 17:56:04 +09:00
commit 341d5f5a5b
498 changed files with 102813 additions and 0 deletions

View File

@@ -0,0 +1,316 @@
#!/usr/bin/env python3
"""
Keyword Analyzer for SEO Gateway Pages
Analyzes keywords and generates SEO strategy recommendations
"""
import json
from typing import Dict, List, Tuple
from dataclasses import dataclass
from datetime import datetime
@dataclass
class KeywordData:
"""Data structure for keyword information"""
keyword: str
search_volume: int
difficulty: float
intent: str
cpc: float = 0.0
trend: str = "stable"
class KeywordAnalyzer:
"""Analyzes keywords for SEO gateway pages"""
def __init__(self, primary_keyword: str):
self.primary_keyword = primary_keyword
self.results = {
"primary": None,
"lsi": [],
"long_tail": [],
"questions": [],
"intent_distribution": {},
"recommendations": []
}
def analyze_primary_keyword(self) -> KeywordData:
"""
Analyzes the primary keyword
In production, this would call actual keyword research APIs
"""
# Simulated data - replace with actual API calls
keyword_data = {
"눈 성형": {"volume": 12000, "difficulty": 65, "intent": "informational", "cpc": 2500},
"이마 성형": {"volume": 5500, "difficulty": 55, "intent": "informational", "cpc": 3000},
"동안 성형": {"volume": 8000, "difficulty": 70, "intent": "comparative", "cpc": 2800},
}
data = keyword_data.get(self.primary_keyword, {
"volume": 1000,
"difficulty": 50,
"intent": "informational",
"cpc": 1000
})
self.results["primary"] = KeywordData(
keyword=self.primary_keyword,
search_volume=data["volume"],
difficulty=data["difficulty"],
intent=data["intent"],
cpc=data["cpc"]
)
return self.results["primary"]
def generate_lsi_keywords(self) -> List[KeywordData]:
"""Generates LSI (Latent Semantic Indexing) keywords"""
lsi_patterns = {
"눈 성형": [
("쌍꺼풀 수술", 8000, "transactional"),
("눈매교정", 5500, "informational"),
("앞트임", 4000, "informational"),
("뒤트임", 3500, "informational"),
("눈 성형 비용", 2000, "comparative"),
("눈 성형 부작용", 1500, "informational"),
("눈 성형 회복기간", 1800, "informational"),
("눈 성형 전후", 3000, "comparative"),
("남자 눈 성형", 2500, "informational"),
("눈 성형 잘하는곳", 2200, "comparative")
],
"이마 성형": [
("이마거상술", 3000, "informational"),
("이마축소술", 2500, "informational"),
("헤어라인교정", 4000, "transactional"),
("이마 성형 비용", 1200, "comparative"),
("이마 보톡스", 6000, "transactional"),
("M자 탈모 수술", 5000, "informational"),
("이마 필러", 4500, "transactional"),
("이마 성형 부작용", 800, "informational"),
("이마 리프팅", 3500, "comparative"),
("이마 주름 제거", 2800, "transactional")
],
"동안 성형": [
("안면 리프팅", 7000, "transactional"),
("실리프팅", 9000, "transactional"),
("보톡스 시술", 15000, "transactional"),
("필러 시술", 12000, "transactional"),
("동안 성형 비용", 2500, "comparative"),
("울쎄라", 8000, "comparative"),
("써마지", 6500, "comparative"),
("동안 시술 종류", 1800, "informational"),
("주름 제거 시술", 4000, "transactional"),
("동안 성형 추천", 2200, "comparative")
]
}
lsi_list = lsi_patterns.get(self.primary_keyword, [
(f"{self.primary_keyword} 비용", 1000, "comparative"),
(f"{self.primary_keyword} 부작용", 800, "informational"),
(f"{self.primary_keyword} 후기", 1200, "comparative"),
])
for keyword, volume, intent in lsi_list:
self.results["lsi"].append(KeywordData(
keyword=keyword,
search_volume=volume,
difficulty=45 + (volume/1000), # Simple difficulty calculation
intent=intent
))
return self.results["lsi"]
def generate_long_tail_keywords(self) -> List[str]:
"""Generates long-tail keyword variations"""
location_modifiers = ["강남", "신사", "청담", "압구정", "서울"]
action_modifiers = ["잘하는곳", "추천", "유명한", "전문", "비용"]
long_tails = []
for location in location_modifiers:
long_tails.append(f"{location} {self.primary_keyword}")
for action in action_modifiers[:2]: # Limit combinations
long_tails.append(f"{location} {self.primary_keyword} {action}")
self.results["long_tail"] = long_tails
return long_tails
def generate_question_keywords(self) -> List[str]:
"""Generates question-based keywords for featured snippets"""
question_templates = [
f"{self.primary_keyword} 비용은 얼마인가요?",
f"{self.primary_keyword} 회복기간은 얼마나 걸리나요?",
f"{self.primary_keyword} 부작용이 있나요?",
f"{self.primary_keyword} 통증이 심한가요?",
f"{self.primary_keyword} 효과는 얼마나 지속되나요?",
f"{self.primary_keyword} 나이 제한이 있나요?",
f"{self.primary_keyword} 후 주의사항은 무엇인가요?"
]
self.results["questions"] = question_templates
return question_templates
def calculate_intent_distribution(self) -> Dict[str, float]:
"""Calculates user intent distribution across keywords"""
intent_counts = {
"informational": 0,
"comparative": 0,
"transactional": 0,
"navigational": 0
}
# Count primary keyword intent
if self.results["primary"]:
intent_counts[self.results["primary"].intent] += self.results["primary"].search_volume
# Count LSI keyword intents
for kw in self.results["lsi"]:
intent_counts[kw.intent] += kw.search_volume
# Calculate percentages
total_volume = sum(intent_counts.values())
if total_volume > 0:
self.results["intent_distribution"] = {
intent: round((count/total_volume) * 100, 1)
for intent, count in intent_counts.items()
if count > 0
}
return self.results["intent_distribution"]
def generate_recommendations(self) -> List[str]:
"""Generates SEO recommendations based on analysis"""
recommendations = []
# Based on search volume
if self.results["primary"] and self.results["primary"].search_volume > 10000:
recommendations.append("High search volume detected - prioritize this page for development")
# Based on intent distribution
intent_dist = self.results["intent_distribution"]
if intent_dist.get("informational", 0) > 50:
recommendations.append("Focus on educational content and comprehensive guides")
if intent_dist.get("comparative", 0) > 30:
recommendations.append("Include comparison tables and competitive differentiators")
if intent_dist.get("transactional", 0) > 20:
recommendations.append("Optimize conversion elements and CTAs above the fold")
# Based on competition
if self.results["primary"] and self.results["primary"].difficulty > 60:
recommendations.append("High competition - invest in quality content and backlinks")
recommendations.append("Target long-tail keywords for quicker wins")
# Question keywords
if len(self.results["questions"]) > 5:
recommendations.append("Implement FAQ schema markup for featured snippets")
self.results["recommendations"] = recommendations
return recommendations
def export_analysis(self, filename: str = None) -> str:
"""Exports the analysis results to JSON"""
if not filename:
filename = f"keyword_analysis_{self.primary_keyword.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d')}.json"
export_data = {
"analysis_date": datetime.now().isoformat(),
"primary_keyword": self.primary_keyword,
"primary_data": {
"keyword": self.results["primary"].keyword,
"search_volume": self.results["primary"].search_volume,
"difficulty": self.results["primary"].difficulty,
"intent": self.results["primary"].intent
} if self.results["primary"] else None,
"lsi_keywords": [
{
"keyword": kw.keyword,
"volume": kw.search_volume,
"intent": kw.intent
} for kw in self.results["lsi"]
],
"long_tail_keywords": self.results["long_tail"],
"question_keywords": self.results["questions"],
"intent_distribution": self.results["intent_distribution"],
"recommendations": self.results["recommendations"]
}
with open(filename, 'w', encoding='utf-8') as f:
json.dump(export_data, f, ensure_ascii=False, indent=2)
return filename
def generate_report(self) -> str:
"""Generates a formatted text report"""
report = f"""
# Keyword Analysis Report
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}
## Primary Keyword: {self.primary_keyword}
- Search Volume: {self.results['primary'].search_volume:,}
- Difficulty: {self.results['primary'].difficulty}/100
- Primary Intent: {self.results['primary'].intent.capitalize()}
## LSI Keywords (Top 10)
"""
for i, kw in enumerate(self.results['lsi'][:10], 1):
report += f"{i}. {kw.keyword} - Volume: {kw.search_volume:,} ({kw.intent})\n"
report += f"\n## User Intent Distribution\n"
for intent, percentage in self.results['intent_distribution'].items():
report += f"- {intent.capitalize()}: {percentage}%\n"
report += f"\n## Long-tail Opportunities\n"
for keyword in self.results['long_tail'][:5]:
report += f"- {keyword}\n"
report += f"\n## Question Keywords (FAQ Optimization)\n"
for question in self.results['questions'][:5]:
report += f"- {question}\n"
report += f"\n## Strategic Recommendations\n"
for i, rec in enumerate(self.results['recommendations'], 1):
report += f"{i}. {rec}\n"
return report
def main():
"""Main execution function"""
import sys
if len(sys.argv) < 2:
print("Usage: python keyword_analyzer.py '키워드'")
print("Example: python keyword_analyzer.py '눈 성형'")
sys.exit(1)
keyword = ' '.join(sys.argv[1:])
print(f"Analyzing keyword: {keyword}")
print("-" * 50)
analyzer = KeywordAnalyzer(keyword)
# Run analysis
analyzer.analyze_primary_keyword()
analyzer.generate_lsi_keywords()
analyzer.generate_long_tail_keywords()
analyzer.generate_question_keywords()
analyzer.calculate_intent_distribution()
analyzer.generate_recommendations()
# Generate and print report
report = analyzer.generate_report()
print(report)
# Export to JSON
filename = analyzer.export_analysis()
print(f"\nAnalysis exported to: {filename}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,25 @@
# Python dependencies for SEO Gateway Page Strategist scripts
# Install with: pip install -r requirements.txt
# Core dependencies
requests>=2.28.0
beautifulsoup4>=4.11.0
pandas>=1.5.0
numpy>=1.23.0
# For API integrations (optional)
google-api-python-client>=2.70.0
pytrends>=4.9.0
# For data visualization (optional)
matplotlib>=3.6.0
seaborn>=0.12.0
# For export formats
openpyxl>=3.0.0
jinja2>=3.1.0
# Development tools
pytest>=7.2.0
black>=22.0.0
pylint>=2.15.0