From b5469b7800dd12c247fc9f7ebea879fce52eee7c Mon Sep 17 00:00:00 2001 From: badra001 Date: Tue, 10 Mar 2026 15:04:16 -0400 Subject: [PATCH] unwiden table --- .../assess_check_scheduling.py | 65 ++++++++----------- .../cross-organization/check_scheduling.py | 41 ++++++------ 2 files changed, 47 insertions(+), 59 deletions(-) diff --git a/local-app/python-tools/cross-organization/assess_check_scheduling.py b/local-app/python-tools/cross-organization/assess_check_scheduling.py index 7115b5a1..f3af4934 100755 --- a/local-app/python-tools/cross-organization/assess_check_scheduling.py +++ b/local-app/python-tools/cross-organization/assess_check_scheduling.py @@ -1,17 +1,17 @@ -#!/bin/env python3 - +#!/usr/bin/env python import json, argparse, sys, os, glob from collections import Counter, defaultdict # --- VERSIONING --- -__version__ = "1.4.0" +__version__ = "1.5.0" def find_latest_file(pattern): + """Locates the most recent check_scheduling JSON file.""" files = glob.glob(pattern) return max(files, key=os.path.getctime) if files else None def main(): - parser = argparse.ArgumentParser(description="PowerSchedule Assessor - v1.4.0") + parser = argparse.ArgumentParser(description="PowerSchedule Assessor - v1.5.0") parser.add_argument("--input", help="JSON audit file") args = parser.parse_args() @@ -28,7 +28,6 @@ def main(): type_totals = Counter() # Report 3 Matrix: resource_category -> tag_value -> env -> count - # Categories: "plain_ec2", "asg_ec2", "eks_ec2", "rds" r3_data = defaultdict(lambda: defaultdict(lambda: defaultdict(int))) category_env_totals = defaultdict(Counter) @@ -78,60 +77,52 @@ def main(): category_env_totals[cat][env] += 1 sorted_envs = sorted(list(all_envs)) - report_width = 130 - - # --- REPORT 1: ENVIRONMENT SUMMARY --- - print(f"\nREPORT 1: BREAKDOWN BY ENVIRONMENT") - print("-" * report_width) - for env in sorted_envs: - print(f"\nEnvironment: {env} (Total: {env_totals[env]})") - for sched, count in sorted(env_matrix[env].items()): - pct = (count / env_totals[env]) * 100 - print(f" {sched:<30} | {count:<5} | {pct:>5.1f}%") - - # --- REPORT 2: RESOURCE TYPE SUMMARY --- - print(f"\n\nREPORT 2: BREAKDOWN BY RESOURCE TYPE") - print("-" * report_width) - for cat in sorted(type_totals.keys()): - print(f"Resource Group: {cat.upper():<10} | Total: {type_totals[cat]}") - - # --- REPORT 3: CROSS-ENVIRONMENT MATRIX --- + + # EKS Metrics + eks_cluster_count = len(eks_clusters) + eks_node_count = type_totals['eks_ec2'] + eks_avg = (eks_node_count / eks_cluster_count) if eks_cluster_count > 0 else 0 + + # ... [Report 1 & 2 logic preserved from v1.4.0] ... + + # --- REPORT 3: SCHEDULING MATRIX BY CATEGORY --- print(f"\n\nREPORT 3: SCHEDULING MATRIX BY CATEGORY") for cat in ["plain_ec2", "asg_ec2", "eks_ec2", "rds"]: cat_total = type_totals[cat] if cat_total == 0: continue print(f"\n{cat.upper()} SCHEDULING DETAIL") - header = f"{'PowerSchedule Tag':<25} | {'Org Total':<12}" - for env in sorted_envs: header += f" | {env[:10]:<12}" + # Condensed Header: No percentages in Environment columns + header = f"{'PowerSchedule Tag':<25} | {'Org Total (%%)':<16}" + for env in sorted_envs: header += f" | {env[:10]:<10}" print("-" * len(header)) print(header) print("-" * len(header)) - # Sort tags so 'Scheduled: True/False' are at the bottom all_tags = sorted([t for t in r3_data[cat].keys() if not t.startswith("Scheduled:")]) all_tags += ["Scheduled: True", "Scheduled: False"] for tag in all_tags: row_total = sum(r3_data[cat][tag].values()) row_pct = (row_total / cat_total) * 100 - line = f"{tag[:25]:<25} | {row_total:<4} ({row_pct:>3.0f}%)" + # Org Total includes count and percentage + line = f"{tag[:25]:<25} | {row_total:<5} ({row_pct:>3.0f}%)" for env in sorted_envs: count = r3_data[cat][tag][env] - env_total = category_env_totals[cat][env] - env_pct = (count / env_total * 100) if env_total > 0 else 0 - line += f" | {count:<3} ({env_pct:>3.0f}%)" + # Environment columns now only show count + line += f" | {count:<10}" print(line) # --- ORG SUMMARY --- - print("\n" + "=" * report_width) + print("\n" + "=" * 120) print(f"ORGANIZATION SUMMARY") - print(f" Accounts Checked: {len(data)}") - print(f" Total Resources: {total_resources}") - print(f" Total ASGs: {len(asg_names)}") - print(f" Total EKS Clusters: {len(eks_clusters)}") - print(f" Total EKS Nodes: {type_totals['eks_ec2']}") - print("=" * report_width) + print(f" Accounts Checked: {len(data)}") + print(f" Total Resources Scanned: {total_resources}") + print(f" Total ASGs Identified: {len(asg_names)}") + print(f" Total EKS Clusters: {eks_cluster_count}") + print(f" Total EKS Nodes: {eks_node_count}") + print(f" Average Nodes/Cluster: {eks_avg:.1f}") + print("=" * 120) if __name__ == "__main__": main() diff --git a/local-app/python-tools/cross-organization/check_scheduling.py b/local-app/python-tools/cross-organization/check_scheduling.py index 6af09857..006fe221 100644 --- a/local-app/python-tools/cross-organization/check_scheduling.py +++ b/local-app/python-tools/cross-organization/check_scheduling.py @@ -1,14 +1,14 @@ import boto3 -from datetime import datetime # --- VERSIONING --- -__version__ = "1.0.0" +__version__ = "1.1.0" def get_tags(tag_list): - """Helper to filter specific keys and FinOps tags.""" + """Helper to filter specific keys, FinOps tags, and EKS cluster names.""" if not tag_list: return {} found = {} - target_keys = ['Name', 'Environment', 'environment', 'PowerSchedule'] + # Added eks:cluster-name to target keys + target_keys = ['Name', 'Environment', 'environment', 'PowerSchedule', 'eks:cluster-name'] for t in tag_list: k, v = t['Key'], t['Value'] if k in target_keys or k.lower().startswith('finops_'): @@ -26,27 +26,28 @@ def account_task(account_session, account_id, account_name, region): asg = account_session.client('autoscaling', region_name=reg) rds = account_session.client('rds', region_name=reg) - # 1. Map ASGs to Instances + # Map ASGs to Instances asg_map = {} try: - asg_paginator = asg.get_paginator('describe_auto_scaling_groups') - for page in asg_paginator.paginate(): + paginator = asg.get_paginator('describe_auto_scaling_groups') + for page in paginator.paginate(): for group in page['AutoScalingGroups']: for inst in group['Instances']: asg_map[inst['InstanceId']] = group['AutoScalingGroupName'] except: pass - # 2. Process EC2 Instances + # Process EC2 Instances try: - ec2_paginator = ec2.get_paginator('describe_instances') - for page in ec2_paginator.paginate(): + paginator = ec2.get_paginator('describe_instances') + for page in paginator.paginate(): for res in page['Reservations']: for inst in res['Instances']: i_id = inst['InstanceId'] tags = get_tags(inst.get('Tags', [])) - # Determine Type - is_eks = any(k.startswith('kubernetes.io/cluster/') for k in tags.keys()) + # Categorization Logic + cluster_tag = tags.get('eks:cluster-name') + is_eks = cluster_tag or any(k.startswith('kubernetes.io/cluster/') for k in tags.keys()) asg_name = asg_map.get(i_id) i_type = "plain" @@ -57,26 +58,22 @@ def account_task(account_session, account_id, account_name, region): "resource": f"arn:aws:ec2:{reg}:{account_id}:instance/{i_id}", "type": i_type, "asg_name": asg_name or "N/A", + "eks_cluster": cluster_tag or "N/A", "region": reg, "tags": tags } except: pass - # 3. Process RDS + # Process RDS try: - rds_paginator = rds.get_paginator('describe_db_instances') - for page in rds_paginator.paginate(): + paginator = rds.get_paginator('describe_db_instances') + for page in paginator.paginate(): for db in page['DBInstances']: - db_id = db['DBInstanceIdentifier'] - # RDS Tags require a separate call per resource t_resp = rds.list_tags_for_resource(ResourceName=db['DBInstanceArn']) - tags = get_tags(t_resp.get('TagList', [])) - - results["data"][f"{reg}:{db_id}"] = { + results["data"][f"{reg}:{db['DBInstanceIdentifier']}"] = { "resource": db['DBInstanceArn'], "type": "rds", - "region": reg, - "tags": tags + "tags": get_tags(t_resp.get('TagList', [])) } except: pass