feat: Add installation tool, Claude.ai export, and skill standardization (#1)

## Summary

- Add portable installation tool (`install.sh`) for cross-machine setup
- Add Claude.ai export files with proper YAML frontmatter
- Add multi-agent-guide v2.0 with consolidated framework template
- Rename `00-claude-code-setting` → `00-our-settings-audit` (avoid reserved word)
- Add YAML frontmatter to 25+ SKILL.md files for Claude Desktop compatibility

## Commits Included

- `93f604a` feat: Add portable installation tool for cross-machine setup
- `9b84104` feat: Add Claude.ai export for portable skill installation
- `f7ab973` fix: Add YAML frontmatter to Claude.ai export files
- `3fed49a` feat(multi-agent-guide): Add v2.0 with consolidated framework
- `3be26ef` refactor: Rename settings-audit skill and add YAML frontmatter

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Andrew Yim
2026-02-03 16:48:06 +07:00
committed by GitHub
parent 0bc24d00b9
commit b6a478e1df
72 changed files with 4770 additions and 803 deletions

View File

@@ -0,0 +1,232 @@
#!/usr/bin/env python3
"""
Extensions Analyzer
Analyzes Claude Code commands, skills, and agents.
"""
import json
import re
import sys
from pathlib import Path
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
MAX_COMMAND_LINES = 100
MAX_SKILL_LINES = 500
class ExtensionsAnalyzer:
def __init__(self):
self.findings = {
"critical": [],
"warnings": [],
"passing": [],
"recommendations": []
}
self.commands = {}
self.skills = {}
self.agents = {}
def find_extension_dirs(self) -> dict:
"""Find extension directories."""
base_paths = [
Path.home() / ".claude",
Path.cwd() / ".claude",
]
dirs = {"commands": [], "skills": [], "agents": []}
for base in base_paths:
for ext_type in dirs.keys():
path = base / ext_type
if path.exists() and path.is_dir():
dirs[ext_type].append(path)
return dirs
def parse_frontmatter(self, content: str) -> dict | None:
"""Parse YAML frontmatter."""
if not content.startswith('---'):
return None
try:
end = content.find('---', 3)
if end == -1:
return None
yaml_content = content[3:end].strip()
if HAS_YAML:
return yaml.safe_load(yaml_content)
else:
# Basic parsing without yaml
result = {}
for line in yaml_content.split('\n'):
if ':' in line:
key, value = line.split(':', 1)
result[key.strip()] = value.strip()
return result
except Exception:
return None
def analyze_command(self, path: Path) -> dict:
"""Analyze a command file."""
try:
content = path.read_text()
except IOError:
return {"name": path.stem, "error": "Could not read"}
lines = len(content.split('\n'))
frontmatter = self.parse_frontmatter(content)
analysis = {
"name": path.stem,
"lines": lines,
"has_frontmatter": frontmatter is not None,
"has_description": frontmatter and "description" in frontmatter,
"issues": []
}
if not analysis["has_frontmatter"]:
analysis["issues"].append("Missing YAML frontmatter")
elif not analysis["has_description"]:
analysis["issues"].append("Missing description")
if lines > MAX_COMMAND_LINES:
analysis["issues"].append(f"Too long: {lines} lines (max {MAX_COMMAND_LINES})")
if not re.match(r'^[a-z][a-z0-9-]*$', analysis["name"]):
analysis["issues"].append("Name should be kebab-case")
return analysis
def analyze_skill(self, path: Path) -> dict:
"""Analyze a skill directory."""
skill_md = path / "SKILL.md"
if not skill_md.exists():
return {
"name": path.name,
"error": "Missing SKILL.md",
"issues": ["Missing SKILL.md"]
}
try:
content = skill_md.read_text()
except IOError:
return {"name": path.name, "error": "Could not read SKILL.md", "issues": []}
lines = len(content.split('\n'))
frontmatter = self.parse_frontmatter(content)
analysis = {
"name": path.name,
"lines": lines,
"has_frontmatter": frontmatter is not None,
"has_description": frontmatter and "description" in frontmatter,
"issues": []
}
if not analysis["has_frontmatter"]:
analysis["issues"].append("Missing frontmatter in SKILL.md")
if lines > MAX_SKILL_LINES:
analysis["issues"].append(f"Too long: {lines} lines (max {MAX_SKILL_LINES})")
return analysis
def analyze_agent(self, path: Path) -> dict:
"""Analyze an agent file."""
try:
content = path.read_text()
except IOError:
return {"name": path.stem, "error": "Could not read", "issues": []}
frontmatter = self.parse_frontmatter(content)
analysis = {
"name": path.stem,
"has_frontmatter": frontmatter is not None,
"tools_restricted": False,
"issues": []
}
if frontmatter:
tools = frontmatter.get("tools", "*")
analysis["tools_restricted"] = tools != "*" and tools
if not analysis["has_frontmatter"]:
analysis["issues"].append("Missing frontmatter")
if not analysis["tools_restricted"]:
analysis["issues"].append("Tools not restricted (consider limiting)")
return analysis
def analyze(self) -> dict:
"""Run full analysis."""
dirs = self.find_extension_dirs()
# Analyze commands
for cmd_dir in dirs["commands"]:
for cmd_file in cmd_dir.glob("*.md"):
analysis = self.analyze_command(cmd_file)
self.commands[analysis["name"]] = analysis
if analysis.get("issues"):
for issue in analysis["issues"]:
self.findings["warnings"].append(f"Command '{analysis['name']}': {issue}")
else:
self.findings["passing"].append(f"Command '{analysis['name']}': OK")
# Analyze skills
for skill_dir in dirs["skills"]:
for skill_path in skill_dir.iterdir():
if skill_path.is_dir():
analysis = self.analyze_skill(skill_path)
self.skills[analysis["name"]] = analysis
if analysis.get("issues"):
for issue in analysis["issues"]:
if "Missing SKILL.md" in issue:
self.findings["critical"].append(f"Skill '{analysis['name']}': {issue}")
else:
self.findings["warnings"].append(f"Skill '{analysis['name']}': {issue}")
else:
self.findings["passing"].append(f"Skill '{analysis['name']}': OK")
# Analyze agents
for agent_dir in dirs["agents"]:
for agent_file in agent_dir.glob("*.md"):
analysis = self.analyze_agent(agent_file)
self.agents[analysis["name"]] = analysis
if analysis.get("issues"):
for issue in analysis["issues"]:
self.findings["warnings"].append(f"Agent '{analysis['name']}': {issue}")
else:
self.findings["passing"].append(f"Agent '{analysis['name']}': OK")
return {
"commands_count": len(self.commands),
"skills_count": len(self.skills),
"agents_count": len(self.agents),
"commands": self.commands,
"skills": self.skills,
"agents": self.agents,
"findings": self.findings
}
def main():
analyzer = ExtensionsAnalyzer()
report = analyzer.analyze()
print(json.dumps(report, indent=2, default=str))
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python3
"""
Token Usage Analyzer
Analyzes MCP servers and CLAUDE.md for token efficiency.
"""
import json
import sys
from pathlib import Path
# Token estimates for known MCP servers
MCP_TOKEN_ESTIMATES = {
"playwright": 13500,
"puppeteer": 13500,
"notion": 5000,
"github": 18000,
"postgres": 8000,
"postgresql": 8000,
"bigquery": 10000,
"firecrawl": 6000,
"zapier": 25000,
"slack": 8000,
"linear": 6000,
"memory": 3000,
"filesystem": 4000,
"brave-search": 3000,
"fetch": 2000,
"sequential-thinking": 2000,
"chrome-devtools": 8000,
"dtm-agent": 5000,
}
# Load strategy recommendations
LOAD_STRATEGIES = {
"playwright": "always",
"puppeteer": "always",
"notion": "always",
"github": "lazy",
"postgres": "lazy",
"postgresql": "lazy",
"bigquery": "lazy",
"firecrawl": "lazy",
"zapier": "disable",
"slack": "lazy",
"linear": "lazy",
"memory": "lazy",
"filesystem": "always",
"chrome-devtools": "always",
}
TOKENS_PER_WORD = 1.3
MAX_CLAUDE_MD_LINES = 200
MAX_CLAUDE_MD_TOKENS = 3000
class TokenAnalyzer:
def __init__(self):
self.findings = {
"critical": [],
"warnings": [],
"passing": [],
"recommendations": []
}
self.mcp_servers = {}
self.claude_md_files = []
self.mcp_tokens = 0
self.claude_md_tokens = 0
def find_settings_files(self) -> list:
"""Find MCP settings files."""
locations = [
Path.home() / ".claude" / "settings.json",
Path.cwd() / ".claude" / "settings.json",
Path.cwd() / ".mcp.json",
]
return [p for p in locations if p.exists()]
def find_claude_md_files(self) -> list:
"""Find CLAUDE.md files."""
locations = [
Path.home() / ".claude" / "CLAUDE.md",
Path.cwd() / "CLAUDE.md",
Path.cwd() / ".claude" / "CLAUDE.md",
]
return [p for p in locations if p.exists()]
def estimate_server_tokens(self, name: str) -> int:
"""Estimate tokens for a server."""
name_lower = name.lower()
for key, tokens in MCP_TOKEN_ESTIMATES.items():
if key in name_lower:
return tokens
return 5000 # Default estimate
def get_load_strategy(self, name: str, config: dict = None) -> str:
"""Get load strategy - checks actual config first, then recommendations."""
# Check actual autoStart setting in config
if config and config.get("autoStart") is False:
return "lazy"
name_lower = name.lower()
for key, strategy in LOAD_STRATEGIES.items():
if key in name_lower:
return strategy
return "lazy" # Default to lazy for unknown
def analyze_mcp_servers(self):
"""Analyze MCP server configurations."""
settings_files = self.find_settings_files()
if not settings_files:
self.findings["warnings"].append("No MCP settings files found")
return
for settings_path in settings_files:
try:
with open(settings_path) as f:
settings = json.load(f)
except (json.JSONDecodeError, IOError) as e:
self.findings["warnings"].append(f"Could not parse {settings_path}: {e}")
continue
servers = settings.get("mcpServers", {})
for name, config in servers.items():
if not isinstance(config, dict):
continue
tokens = self.estimate_server_tokens(name)
has_instructions = "serverInstructions" in config
strategy = self.get_load_strategy(name, config)
self.mcp_servers[name] = {
"tokens": tokens,
"has_instructions": has_instructions,
"strategy": strategy,
"source": str(settings_path)
}
# Only count "always" servers for baseline
if strategy == "always":
self.mcp_tokens += tokens
# Generate findings
if not has_instructions:
self.findings["critical"].append(
f"MCP '{name}': Missing serverInstructions (breaks Tool Search)"
)
else:
self.findings["passing"].append(f"MCP '{name}': Has serverInstructions")
if tokens > 15000 and strategy == "always":
self.findings["warnings"].append(
f"MCP '{name}': Heavy server (~{tokens:,} tokens), consider lazy loading"
)
def analyze_claude_md(self):
"""Analyze CLAUDE.md files."""
files = self.find_claude_md_files()
if not files:
self.findings["warnings"].append("No CLAUDE.md files found")
return
for path in files:
try:
content = path.read_text()
except IOError as e:
self.findings["warnings"].append(f"Could not read {path}: {e}")
continue
lines = len(content.split('\n'))
words = len(content.split())
tokens = int(words * TOKENS_PER_WORD)
self.claude_md_files.append({
"path": str(path),
"lines": lines,
"words": words,
"tokens": tokens
})
self.claude_md_tokens += tokens
# Generate findings
if tokens > MAX_CLAUDE_MD_TOKENS:
self.findings["critical"].append(
f"CLAUDE.md ({path.name}): ~{tokens:,} tokens exceeds {MAX_CLAUDE_MD_TOKENS:,} limit"
)
elif lines > MAX_CLAUDE_MD_LINES:
self.findings["warnings"].append(
f"CLAUDE.md ({path.name}): {lines} lines exceeds {MAX_CLAUDE_MD_LINES} recommended"
)
else:
self.findings["passing"].append(
f"CLAUDE.md ({path.name}): {lines} lines, ~{tokens:,} tokens - Good"
)
# Check structure
if '\n\n\n' in content:
self.findings["warnings"].append(
f"CLAUDE.md ({path.name}): Contains excessive whitespace"
)
# Check for common redundancy
content_lower = content.lower()
if "you are claude" in content_lower or "you are an ai" in content_lower:
self.findings["recommendations"].append(
f"CLAUDE.md ({path.name}): Remove self-descriptions Claude already knows"
)
def analyze(self) -> dict:
"""Run full analysis."""
self.analyze_mcp_servers()
self.analyze_claude_md()
total_tokens = self.mcp_tokens + self.claude_md_tokens
usage_pct = (total_tokens / 200000) * 100
# Overall recommendations
if usage_pct > 30:
self.findings["critical"].append(
f"Baseline uses {usage_pct:.1f}% of context - target is under 30%"
)
elif usage_pct > 20:
self.findings["warnings"].append(
f"Baseline uses {usage_pct:.1f}% of context - consider optimization"
)
missing_instructions = sum(
1 for s in self.mcp_servers.values() if not s.get("has_instructions")
)
if missing_instructions > 0:
self.findings["recommendations"].append(
f"Add serverInstructions to {missing_instructions} MCP server(s) for Tool Search"
)
return {
"total_tokens": total_tokens,
"mcp_tokens": self.mcp_tokens,
"claude_md_tokens": self.claude_md_tokens,
"mcp_count": len(self.mcp_servers),
"mcp_servers": self.mcp_servers,
"claude_md_files": self.claude_md_files,
"usage_percentage": round(usage_pct, 1),
"findings": self.findings
}
def main():
analyzer = TokenAnalyzer()
report = analyzer.analyze()
print(json.dumps(report, indent=2))
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,173 @@
#!/usr/bin/env python3
"""
Auto-Fix Script
Applies safe fixes to Claude Code configuration with backup.
"""
import json
import shutil
import sys
from datetime import datetime
from pathlib import Path
# serverInstructions templates for common MCP servers
SERVER_INSTRUCTIONS = {
"playwright": "Browser automation for web interaction. Use for: SEO audits, page analysis, screenshots, form testing, Core Web Vitals. Keywords: browser, page, screenshot, click, navigate, DOM, selector",
"puppeteer": "Chrome automation for web testing. Use for: SEO audits, page rendering, JavaScript site testing. Keywords: browser, chrome, headless, screenshot, page",
"notion": "Notion workspace integration. Use for: saving research, documentation, project notes, knowledge base. Keywords: notion, page, database, wiki, notes, save",
"github": "GitHub repository management. Use for: commits, PRs, issues, code review. Keywords: git, github, commit, pull request, issue, repository",
"postgres": "PostgreSQL database queries. Use for: data analysis, SQL queries, analytics. Keywords: sql, query, database, table, select, analytics",
"postgresql": "PostgreSQL database queries. Use for: data analysis, SQL queries, analytics. Keywords: sql, query, database, table, select, analytics",
"bigquery": "Google BigQuery for large-scale analysis. Use for: analytics queries, data warehouse. Keywords: bigquery, sql, analytics, data warehouse",
"firecrawl": "Web scraping and crawling. Use for: site crawling, content extraction, competitor analysis. Keywords: crawl, scrape, extract, spider, sitemap",
"slack": "Slack workspace integration. Use for: messages, notifications, team communication. Keywords: slack, message, channel, notification",
"linear": "Linear issue tracking. Use for: issue management, project tracking. Keywords: linear, issue, task, project, sprint",
"memory": "Persistent memory across sessions. Use for: storing preferences, context recall. Keywords: remember, memory, store, recall",
"filesystem": "Local file operations. Use for: file reading/writing, directory management. Keywords: file, directory, read, write, path",
"chrome-devtools": "Chrome DevTools for debugging. Use for: GTM debugging, network analysis, console logs. Keywords: devtools, chrome, debug, network, console",
}
class AutoFixer:
def __init__(self, dry_run: bool = True):
self.dry_run = dry_run
self.fixes = []
self.backup_dir = Path.home() / ".claude" / "backups" / datetime.now().strftime("%Y%m%d_%H%M%S")
def backup_file(self, path: Path) -> bool:
"""Create backup before modifying."""
if not path.exists():
return True
try:
backup_path = self.backup_dir / path.relative_to(Path.home())
backup_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(path, backup_path)
return True
except Exception as e:
print(f"Warning: Could not backup {path}: {e}", file=sys.stderr)
return False
def fix_mcp_instructions(self, settings_path: Path) -> list:
"""Add serverInstructions to MCP servers."""
fixes = []
try:
with open(settings_path) as f:
settings = json.load(f)
except (json.JSONDecodeError, IOError) as e:
return [f"Error reading {settings_path}: {e}"]
servers = settings.get("mcpServers", {})
modified = False
for name, config in servers.items():
if not isinstance(config, dict):
continue
if "serverInstructions" in config:
continue
# Find matching template
instructions = None
name_lower = name.lower()
for key, template in SERVER_INSTRUCTIONS.items():
if key in name_lower:
instructions = template
break
if not instructions:
instructions = f"External tool: {name}. Use when this functionality is needed."
if self.dry_run:
fixes.append(f"[DRY RUN] Would add serverInstructions to '{name}'")
else:
config["serverInstructions"] = instructions
modified = True
fixes.append(f"Added serverInstructions to '{name}'")
if modified and not self.dry_run:
self.backup_file(settings_path)
with open(settings_path, 'w') as f:
json.dump(settings, f, indent=2)
return fixes
def fix_command_frontmatter(self, cmd_path: Path) -> str | None:
"""Add frontmatter to command missing it."""
try:
content = cmd_path.read_text()
except IOError:
return None
if content.startswith('---'):
return None
new_content = f'''---
description: {cmd_path.stem.replace('-', ' ').title()} command
---
{content}'''
if self.dry_run:
return f"[DRY RUN] Would add frontmatter to {cmd_path.name}"
self.backup_file(cmd_path)
cmd_path.write_text(new_content)
return f"Added frontmatter to {cmd_path.name}"
def run(self) -> dict:
"""Apply all fixes."""
results = {"applied": [], "skipped": [], "errors": []}
# Fix MCP settings
settings_paths = [
Path.home() / ".claude" / "settings.json",
Path.cwd() / ".claude" / "settings.json"
]
for path in settings_paths:
if path.exists():
fixes = self.fix_mcp_instructions(path)
results["applied"].extend(fixes)
# Fix commands without frontmatter
cmd_dirs = [
Path.home() / ".claude" / "commands",
Path.cwd() / ".claude" / "commands"
]
for cmd_dir in cmd_dirs:
if cmd_dir.exists():
for cmd_file in cmd_dir.glob("*.md"):
fix = self.fix_command_frontmatter(cmd_file)
if fix:
results["applied"].append(fix)
return results
def main():
import argparse
parser = argparse.ArgumentParser(description="Auto-fix Claude Code settings")
parser.add_argument("--apply", action="store_true", help="Apply fixes (default is dry-run)")
args = parser.parse_args()
fixer = AutoFixer(dry_run=not args.apply)
results = fixer.run()
print(json.dumps(results, indent=2))
if fixer.dry_run:
print("\n[DRY RUN] No changes applied. Use --apply to apply fixes.", file=sys.stderr)
else:
print(f"\n[APPLIED] {len(results['applied'])} fixes.", file=sys.stderr)
if fixer.backup_dir.exists():
print(f"Backups: {fixer.backup_dir}", file=sys.stderr)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,7 @@
# Claude Code Settings Optimizer
# No external dependencies - uses only Python standard library
# json, sys, pathlib are built-in
# Optional: For future enhancements
# pyyaml>=6.0 # YAML parsing for MCP configs
# rich>=13.0 # Better terminal output

View File

@@ -0,0 +1,245 @@
#!/usr/bin/env python3
"""
Claude Code Settings Audit - Main Orchestrator
Analyzes configuration for token efficiency and optimization.
"""
import json
import sys
import subprocess
from pathlib import Path
from datetime import datetime
SCRIPT_DIR = Path(__file__).parent
CONTEXT_LIMIT = 200_000
def run_analyzer(script_name: str) -> dict:
"""Run an analyzer script and return its output."""
script_path = SCRIPT_DIR / script_name
if not script_path.exists():
return {"error": f"Script not found: {script_path}"}
try:
result = subprocess.run(
[sys.executable, str(script_path)],
capture_output=True,
text=True,
timeout=60
)
if result.returncode != 0 and not result.stdout:
return {"error": result.stderr or "Unknown error"}
return json.loads(result.stdout)
except subprocess.TimeoutExpired:
return {"error": "Analysis timed out"}
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON: {e}"}
except Exception as e:
return {"error": str(e)}
def calculate_health(token_report: dict, extensions_report: dict) -> str:
"""Determine overall health status."""
total_tokens = token_report.get("total_tokens", 0)
usage_pct = (total_tokens / CONTEXT_LIMIT) * 100
critical_issues = len(token_report.get("findings", {}).get("critical", []))
critical_issues += len(extensions_report.get("findings", {}).get("critical", []))
if usage_pct > 30 or critical_issues > 2:
return "Critical"
elif usage_pct > 20 or critical_issues > 0:
return "Needs Attention"
return "Good"
def generate_report(token_report: dict, extensions_report: dict) -> str:
"""Generate markdown report."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
total_tokens = token_report.get("total_tokens", 0)
available = CONTEXT_LIMIT - total_tokens
usage_pct = (total_tokens / CONTEXT_LIMIT) * 100
available_pct = 100 - usage_pct
health = calculate_health(token_report, extensions_report)
health_emoji = {"Good": "🟢", "Needs Attention": "🟡", "Critical": "🔴"}[health]
# Collect all findings
all_critical = []
all_warnings = []
all_passing = []
all_recommendations = []
for report in [token_report, extensions_report]:
findings = report.get("findings", {})
all_critical.extend(findings.get("critical", []))
all_warnings.extend(findings.get("warnings", []))
all_passing.extend(findings.get("passing", []))
all_recommendations.extend(findings.get("recommendations", []))
report = f"""# Claude Code Settings Audit Report
**Generated:** {timestamp}
---
## Token Budget Summary
| Component | Tokens | % of 200K | Status |
|-----------|--------|-----------|--------|
| CLAUDE.md | {token_report.get('claude_md_tokens', 0):,} | {token_report.get('claude_md_tokens', 0)/CONTEXT_LIMIT*100:.1f}% | {'🟢' if token_report.get('claude_md_tokens', 0) < 3000 else '🔴'} |
| MCP Servers | {token_report.get('mcp_tokens', 0):,} | {token_report.get('mcp_tokens', 0)/CONTEXT_LIMIT*100:.1f}% | {'🟢' if token_report.get('mcp_tokens', 0) < 10000 else '🟡'} |
| **Baseline Total** | **{total_tokens:,}** | **{usage_pct:.1f}%** | {health_emoji} |
| **Available for Work** | **{available:,}** | **{available_pct:.1f}%** | — |
**Target:** Baseline under 30% (60,000 tokens), Available over 70%
---
## Overall Health: {health_emoji} {health}
- Critical Issues: {len(all_critical)}
- Warnings: {len(all_warnings)}
- Passing Checks: {len(all_passing)}
---
## MCP Server Analysis
**Servers:** {token_report.get('mcp_count', 0)} configured
"""
# MCP server details
mcp_servers = token_report.get("mcp_servers", {})
if mcp_servers:
report += "| Server | Tokens | Instructions | Strategy |\n"
report += "|--------|--------|--------------|----------|\n"
for name, info in mcp_servers.items():
instr = "" if info.get("has_instructions") else ""
tokens = info.get("tokens", 0)
strategy = info.get("strategy", "unknown")
report += f"| {name} | ~{tokens:,} | {instr} | {strategy} |\n"
report += "\n"
# CLAUDE.md analysis
report += f"""---
## CLAUDE.md Analysis
"""
claude_files = token_report.get("claude_md_files", [])
for cf in claude_files:
status = "🟢" if cf.get("tokens", 0) < 3000 else "🔴"
report += f"- **{cf.get('path', 'Unknown')}**: {cf.get('lines', 0)} lines, ~{cf.get('tokens', 0):,} tokens {status}\n"
if not claude_files:
report += "*No CLAUDE.md files found*\n"
# Extensions
report += f"""
---
## Extensions Analysis
- Commands: {extensions_report.get('commands_count', 0)}
- Skills: {extensions_report.get('skills_count', 0)}
- Agents: {extensions_report.get('agents_count', 0)}
"""
# Findings
if all_critical:
report += "---\n\n## ❌ Critical Issues\n\n"
for issue in all_critical:
report += f"- {issue}\n"
report += "\n"
if all_warnings:
report += "---\n\n## ⚠️ Warnings\n\n"
for warning in all_warnings[:10]:
report += f"- {warning}\n"
if len(all_warnings) > 10:
report += f"- *...and {len(all_warnings) - 10} more*\n"
report += "\n"
if all_passing:
report += "---\n\n## ✅ Passing\n\n"
for item in all_passing[:5]:
report += f"- {item}\n"
if len(all_passing) > 5:
report += f"- *...and {len(all_passing) - 5} more*\n"
report += "\n"
# Recommendations
if all_recommendations or all_critical:
report += "---\n\n## Recommendations\n\n"
priority = 1
for issue in all_critical[:3]:
report += f"{priority}. **Fix:** {issue}\n"
priority += 1
for rec in all_recommendations[:5]:
report += f"{priority}. {rec}\n"
priority += 1
report += "\n"
report += f"""---
## Next Steps
1. Run `python3 scripts/auto_fix.py` to preview fixes
2. Run `python3 scripts/auto_fix.py --apply` to apply fixes
3. Re-run audit to verify improvements
---
*Generated by Claude Code Settings Optimizer*
"""
return report
def main():
print("🔍 Running Claude Code Settings Audit...\n", file=sys.stderr)
print(" Analyzing tokens...", file=sys.stderr)
token_report = run_analyzer("analyze_tokens.py")
print(" Analyzing extensions...", file=sys.stderr)
extensions_report = run_analyzer("analyze_extensions.py")
print(" Generating report...\n", file=sys.stderr)
markdown_report = generate_report(token_report, extensions_report)
print(markdown_report)
# Save reports
output_dir = SCRIPT_DIR.parent if (SCRIPT_DIR.parent / "CLAUDE.md").exists() else Path.cwd()
report_path = output_dir / "settings-audit-report.md"
json_path = output_dir / "settings-audit-report.json"
full_report = {
"timestamp": datetime.now().isoformat(),
"tokens": token_report,
"extensions": extensions_report,
"total_baseline_tokens": token_report.get("total_tokens", 0),
"health": calculate_health(token_report, extensions_report)
}
try:
report_path.write_text(markdown_report)
json_path.write_text(json.dumps(full_report, indent=2, default=str))
print(f"📄 Report: {report_path}", file=sys.stderr)
print(f"📊 JSON: {json_path}", file=sys.stderr)
except IOError as e:
print(f"Warning: Could not save report: {e}", file=sys.stderr)
return 1 if full_report["health"] == "Critical" else 0
if __name__ == "__main__":
sys.exit(main())