Skip to content

Commit

Permalink
unwiden table
Browse files Browse the repository at this point in the history
  • Loading branch information
badra001 committed Mar 10, 2026
1 parent a5d1de8 commit b5469b7
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 59 deletions.
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
#!/bin/env python3

#!/usr/bin/env python
import json, argparse, sys, os, glob
from collections import Counter, defaultdict

# --- VERSIONING ---
__version__ = "1.4.0"
__version__ = "1.5.0"

def find_latest_file(pattern):
"""Locates the most recent check_scheduling JSON file."""
files = glob.glob(pattern)
return max(files, key=os.path.getctime) if files else None

def main():
parser = argparse.ArgumentParser(description="PowerSchedule Assessor - v1.4.0")
parser = argparse.ArgumentParser(description="PowerSchedule Assessor - v1.5.0")
parser.add_argument("--input", help="JSON audit file")
args = parser.parse_args()

Expand All @@ -28,7 +28,6 @@ def main():
type_totals = Counter()

# Report 3 Matrix: resource_category -> tag_value -> env -> count
# Categories: "plain_ec2", "asg_ec2", "eks_ec2", "rds"
r3_data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
category_env_totals = defaultdict(Counter)

Expand Down Expand Up @@ -78,60 +77,52 @@ def main():
category_env_totals[cat][env] += 1

sorted_envs = sorted(list(all_envs))
report_width = 130

# --- REPORT 1: ENVIRONMENT SUMMARY ---
print(f"\nREPORT 1: BREAKDOWN BY ENVIRONMENT")
print("-" * report_width)
for env in sorted_envs:
print(f"\nEnvironment: {env} (Total: {env_totals[env]})")
for sched, count in sorted(env_matrix[env].items()):
pct = (count / env_totals[env]) * 100
print(f" {sched:<30} | {count:<5} | {pct:>5.1f}%")

# --- REPORT 2: RESOURCE TYPE SUMMARY ---
print(f"\n\nREPORT 2: BREAKDOWN BY RESOURCE TYPE")
print("-" * report_width)
for cat in sorted(type_totals.keys()):
print(f"Resource Group: {cat.upper():<10} | Total: {type_totals[cat]}")

# --- REPORT 3: CROSS-ENVIRONMENT MATRIX ---

# EKS Metrics
eks_cluster_count = len(eks_clusters)
eks_node_count = type_totals['eks_ec2']
eks_avg = (eks_node_count / eks_cluster_count) if eks_cluster_count > 0 else 0

# ... [Report 1 & 2 logic preserved from v1.4.0] ...

# --- REPORT 3: SCHEDULING MATRIX BY CATEGORY ---
print(f"\n\nREPORT 3: SCHEDULING MATRIX BY CATEGORY")
for cat in ["plain_ec2", "asg_ec2", "eks_ec2", "rds"]:
cat_total = type_totals[cat]
if cat_total == 0: continue

print(f"\n{cat.upper()} SCHEDULING DETAIL")
header = f"{'PowerSchedule Tag':<25} | {'Org Total':<12}"
for env in sorted_envs: header += f" | {env[:10]:<12}"
# Condensed Header: No percentages in Environment columns
header = f"{'PowerSchedule Tag':<25} | {'Org Total (%%)':<16}"
for env in sorted_envs: header += f" | {env[:10]:<10}"
print("-" * len(header))
print(header)
print("-" * len(header))

# Sort tags so 'Scheduled: True/False' are at the bottom
all_tags = sorted([t for t in r3_data[cat].keys() if not t.startswith("Scheduled:")])
all_tags += ["Scheduled: True", "Scheduled: False"]

for tag in all_tags:
row_total = sum(r3_data[cat][tag].values())
row_pct = (row_total / cat_total) * 100
line = f"{tag[:25]:<25} | {row_total:<4} ({row_pct:>3.0f}%)"
# Org Total includes count and percentage
line = f"{tag[:25]:<25} | {row_total:<5} ({row_pct:>3.0f}%)"

for env in sorted_envs:
count = r3_data[cat][tag][env]
env_total = category_env_totals[cat][env]
env_pct = (count / env_total * 100) if env_total > 0 else 0
line += f" | {count:<3} ({env_pct:>3.0f}%)"
# Environment columns now only show count
line += f" | {count:<10}"
print(line)

# --- ORG SUMMARY ---
print("\n" + "=" * report_width)
print("\n" + "=" * 120)
print(f"ORGANIZATION SUMMARY")
print(f" Accounts Checked: {len(data)}")
print(f" Total Resources: {total_resources}")
print(f" Total ASGs: {len(asg_names)}")
print(f" Total EKS Clusters: {len(eks_clusters)}")
print(f" Total EKS Nodes: {type_totals['eks_ec2']}")
print("=" * report_width)
print(f" Accounts Checked: {len(data)}")
print(f" Total Resources Scanned: {total_resources}")
print(f" Total ASGs Identified: {len(asg_names)}")
print(f" Total EKS Clusters: {eks_cluster_count}")
print(f" Total EKS Nodes: {eks_node_count}")
print(f" Average Nodes/Cluster: {eks_avg:.1f}")
print("=" * 120)

if __name__ == "__main__": main()
41 changes: 19 additions & 22 deletions local-app/python-tools/cross-organization/check_scheduling.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import boto3
from datetime import datetime

# --- VERSIONING ---
__version__ = "1.0.0"
__version__ = "1.1.0"

def get_tags(tag_list):
"""Helper to filter specific keys and FinOps tags."""
"""Helper to filter specific keys, FinOps tags, and EKS cluster names."""
if not tag_list: return {}
found = {}
target_keys = ['Name', 'Environment', 'environment', 'PowerSchedule']
# Added eks:cluster-name to target keys
target_keys = ['Name', 'Environment', 'environment', 'PowerSchedule', 'eks:cluster-name']
for t in tag_list:
k, v = t['Key'], t['Value']
if k in target_keys or k.lower().startswith('finops_'):
Expand All @@ -26,27 +26,28 @@ def account_task(account_session, account_id, account_name, region):
asg = account_session.client('autoscaling', region_name=reg)
rds = account_session.client('rds', region_name=reg)

# 1. Map ASGs to Instances
# Map ASGs to Instances
asg_map = {}
try:
asg_paginator = asg.get_paginator('describe_auto_scaling_groups')
for page in asg_paginator.paginate():
paginator = asg.get_paginator('describe_auto_scaling_groups')
for page in paginator.paginate():
for group in page['AutoScalingGroups']:
for inst in group['Instances']:
asg_map[inst['InstanceId']] = group['AutoScalingGroupName']
except: pass

# 2. Process EC2 Instances
# Process EC2 Instances
try:
ec2_paginator = ec2.get_paginator('describe_instances')
for page in ec2_paginator.paginate():
paginator = ec2.get_paginator('describe_instances')
for page in paginator.paginate():
for res in page['Reservations']:
for inst in res['Instances']:
i_id = inst['InstanceId']
tags = get_tags(inst.get('Tags', []))

# Determine Type
is_eks = any(k.startswith('kubernetes.io/cluster/') for k in tags.keys())
# Categorization Logic
cluster_tag = tags.get('eks:cluster-name')
is_eks = cluster_tag or any(k.startswith('kubernetes.io/cluster/') for k in tags.keys())
asg_name = asg_map.get(i_id)

i_type = "plain"
Expand All @@ -57,26 +58,22 @@ def account_task(account_session, account_id, account_name, region):
"resource": f"arn:aws:ec2:{reg}:{account_id}:instance/{i_id}",
"type": i_type,
"asg_name": asg_name or "N/A",
"eks_cluster": cluster_tag or "N/A",
"region": reg,
"tags": tags
}
except: pass

# 3. Process RDS
# Process RDS
try:
rds_paginator = rds.get_paginator('describe_db_instances')
for page in rds_paginator.paginate():
paginator = rds.get_paginator('describe_db_instances')
for page in paginator.paginate():
for db in page['DBInstances']:
db_id = db['DBInstanceIdentifier']
# RDS Tags require a separate call per resource
t_resp = rds.list_tags_for_resource(ResourceName=db['DBInstanceArn'])
tags = get_tags(t_resp.get('TagList', []))

results["data"][f"{reg}:{db_id}"] = {
results["data"][f"{reg}:{db['DBInstanceIdentifier']}"] = {
"resource": db['DBInstanceArn'],
"type": "rds",
"region": reg,
"tags": tags
"tags": get_tags(t_resp.get('TagList', []))
}
except: pass

Expand Down

0 comments on commit b5469b7

Please sign in to comment.