Skip to content

Commit

Permalink
restore functionality
Browse files Browse the repository at this point in the history
  • Loading branch information
badra001 committed Mar 27, 2026
1 parent 8b8300c commit a8b0be9
Showing 1 changed file with 60 additions and 54 deletions.
114 changes: 60 additions & 54 deletions local-app/python-tools/cross-organization/assess_check_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from collections import defaultdict

# --- VERSIONING ---
__version__ = "1.7.8"
__version__ = "1.8.0"

def get_tag(tags, key, default=""):
"""Case-insensitive tag lookup."""
Expand All @@ -23,31 +23,17 @@ def is_schedule_enabled(power_schedule):
val = str(power_schedule).lower().strip()
return val not in ["always_on", "no schedule", ""]

def generate_master_resource_csv(all_resources, ts):
"""Generates the comprehensive resource/FinOps inventory."""
filename = f"scheduling_summary.resources.{ts}.csv"
target_tags = ["PowerSchedule", "Environment", "Name", "finops_project_number", "finops_project_name"]
fields = ["account_id", "region", "type", "arn"] + target_tags

def write_csv(filename, fields, data):
"""Generic CSV writer helper."""
try:
with open(filename, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=fields)
writer.writeheader()
for res in all_resources:
# Use 'resource' key as fallback for 'arn' based on your JSON snippet
arn = res.get('resource') or res.get('arn', 'N/A')
row = {
"account_id": res.get('account_id', 'N/A'),
"region": res.get('region', 'N/A'),
"type": res.get('type', 'N/A'),
"arn": arn
}
for tag in target_tags:
row[tag] = get_tag(res.get('tags', {}), tag)
writer.writerow(row)
return filename
writer.writerows(data)
return True
except Exception as e:
return f"Error: {str(e)}"
print(f" Error writing {filename}: {e}")
return False

def main():
parser = argparse.ArgumentParser(description=f"Scheduling Assessor v{__version__}")
Expand All @@ -64,70 +50,90 @@ def main():

print(f"Processing {len(audit_files)} audit file(s)...")

# 1. PARSE ALL DATA
for file_path in audit_files:
with open(file_path, 'r') as f:
try:
account_entries = json.load(f)
file_count = 0

# Iterate through the list of account objects
for entry in account_entries:
acc_id = entry.get("account_id", "Unknown")
data_block = entry.get("data", {})

if not data_block:
continue

for res_id, res_info in data_block.items():
# Skip the summary metadata
if res_id == "account_summary":
continue

if res_id == "account_summary": continue
if isinstance(res_info, dict):
# Inject account context for the CSV output
res_info['account_id'] = acc_id
all_res.append(res_info)
file_count += 1

if args.debug:
print(f" [DEBUG] Found {file_count} resources in {file_path}")
except Exception as e:
print(f" Skipping {file_path} due to error: {e}")

if not all_res:
print("❌ Error: No resource data could be extracted. Check JSON structure."); return
print("❌ Error: No resource data could be extracted."); return

# --- CATEGORIZATION LOGIC (RESTORED FROM 1.6.x) ---
categories = {
"plain_ec2": [],
"asg_ec2": [],
"eks_ec2": [],
"rds": []
}

# --- FINOPS HEALTH REPORT ---
for res in all_res:
r_type = res.get('type', '').lower()
if r_type == 'rds':
categories['rds'].append(res)
elif r_type == 'eks_node':
categories['eks_ec2'].append(res)
elif r_type == 'asg_member':
categories['asg_ec2'].append(res)
else:
categories['plain_ec2'].append(res)

# --- TERMINAL REPORTS ---
# Health & Matrix Reports (previously verified logic)
missing_num = sum(1 for r in all_res if not get_tag(r.get('tags', {}), 'finops_project_number'))
missing_name = sum(1 for r in all_res if not get_tag(r.get('tags', {}), 'finops_project_name'))
print("\n" + "=" * 90)
print(f"FINOPS TAGGING HEALTH | Resources: {len(all_res)} | Compliance: {((len(all_res)-missing_num)/len(all_res))*100:.1f}%")

print("\n" + "=" * 85)
print(f"FINOPS TAGGING HEALTH (Total Resources: {len(all_res)})")
print("-" * 85)
print(f" Missing 'finops_project_number': {missing_num:>5}")
print(f" Missing 'finops_project_name': {missing_name:>5}")
print(f" Current Tagging Compliance: {((len(all_res)-missing_num)/len(all_res))*100:.1f}%")

# --- SCHEDULING MATRIX ---
matrix = defaultdict(lambda: {"total": 0, "scheduled": 0})
for res in all_res:
env = get_tag(res.get('tags', {}), 'Environment', 'Unknown')
sched = get_tag(res.get('tags', {}), 'PowerSchedule')
matrix[env]["total"] += 1
if is_schedule_enabled(sched):
if is_schedule_enabled(get_tag(res.get('tags', {}), 'PowerSchedule')):
matrix[env]["scheduled"] += 1

print("\nSCHEDULING COMPLIANCE MATRIX")
print(f"{'Environment':<25} | {'Total':<8} | {'Scheduled':<12} | {'Compliance %'}")
print("-" * 85)
print("-" * 90)
for env, counts in sorted(matrix.items()):
pct = (counts['scheduled'] / counts['total']) * 100
print(f"{env:<25} | {counts['total']:<8} | {counts['scheduled']:<12} | {pct:.1f}%")

# --- FILE GENERATION (ALL 5 FILES) ---
print("\nGENERATING REPORTS...")
master_file = generate_master_resource_csv(all_res, ts)
print(f" [DONE] Created Master Inventory: {master_file}")
print("=" * 85 + "\n")

# Common fields for sub-reports
sub_fields = ["account_id", "region", "type", "resource", "asg_name", "eks_cluster"]

for cat, data in categories.items():
fname = f"scheduling_summary.{cat}.{ts}.csv"
# Map list of tags to a single column for these sub-reports to keep them compact
for d in data: d['all_tags'] = str(d.get('tags', {}))
write_csv(fname, sub_fields + ["all_tags"], data)
print(f" - Created {cat} report: {fname}")

# The Master Resources/FinOps File
master_tags = ["PowerSchedule", "Environment", "Name", "finops_project_number", "finops_project_name"]
master_fields = ["account_id", "region", "type", "arn"] + master_tags
master_rows = []
for res in all_res:
row = {"account_id": res['account_id'], "region": res['region'], "type": res['type'], "arn": res.get('resource')}
for tag in master_tags: row[tag] = get_tag(res.get('tags', {}), tag)
master_rows.append(row)

master_name = f"scheduling_summary.resources.{ts}.csv"
write_csv(master_name, master_fields, master_rows)
print(f" - Created Master FinOps Inventory: {master_name}")
print("=" * 90 + "\n")

if __name__ == "__main__":
main()

0 comments on commit a8b0be9

Please sign in to comment.