diff --git a/.circleci/config.yml b/.circleci/config.yml index a878033..201884e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,6 +2,7 @@ version: 2.1 orbs: python: circleci/python@0.3.0 + codecov: codecov/codecov@1.1.0 jobs: build: @@ -20,6 +21,7 @@ jobs: path: test-results - store_artifacts: path: test-results + - codecov/upload deploy: executor: python/default steps: diff --git a/.coveragerc b/.coveragerc index 1f33683..2c331ad 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,2 +1,2 @@ [run] -omit = tests/*,venv/* \ No newline at end of file +omit = cloudiscovery/tests/*,venv/* \ No newline at end of file diff --git a/.prospector.yaml b/.prospector.yaml index 9dbfd96..85b9d62 100644 --- a/.prospector.yaml +++ b/.prospector.yaml @@ -20,4 +20,4 @@ pep8: mccabe: options: - max-complexity: 19 \ No newline at end of file + max-complexity: 21 \ No newline at end of file diff --git a/.pylintrc b/.pylintrc index 7cb9b0c..b090524 100644 --- a/.pylintrc +++ b/.pylintrc @@ -5,4 +5,9 @@ init-hook='import sys; sys.path.append("cloudiscovery")' max-line-length=120 [MESSAGES CONTROL] -disable=pointless-string-statement,locally-disabled,bad-super-call,unnecessary-lambda,missing-class-docstring,arguments-differ,unused-argument,useless-object-inheritance,too-few-public-methods,missing-module-docstring,import-error,eval-used,bad-continuation,invalid-name,missing-function-docstring,no-self-use,no-name-in-module,too-many-lines,attribute-defined-outside-init,fixme,exec-used,expression-not-assigned,too-many-branches +disable=missing-docstring,useless-suppression,pointless-string-statement,locally-disabled,bad-super-call,unnecessary-lambda,missing-class-docstring,arguments-differ,unused-argument,useless-object-inheritance,too-few-public-methods,missing-module-docstring,import-error,eval-used,bad-continuation,invalid-name,missing-function-docstring,no-self-use,no-name-in-module,too-many-lines,attribute-defined-outside-init,fixme,exec-used,expression-not-assigned,too-many-branches + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=6 \ No newline at end of file diff --git a/README.md b/README.md index 9ab0414..1e5f3f2 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![PyPI version](https://badge.fury.io/py/cloudiscovery.svg)](https://badge.fury.io/py/cloudiscovery) [![Downloads](https://pepy.tech/badge/cloudiscovery)](https://pepy.tech/project/cloudiscovery) +[![codecov](https://codecov.io/gh/Cloud-Architects/cloudiscovery/branch/develop/graph/badge.svg)](https://codecov.io/gh/Cloud-Architects/cloudiscovery) ![python version](https://img.shields.io/badge/python-3.6%2C3.7%2C3.8-blue?logo=python) [![CircleCI](https://circleci.com/gh/Cloud-Architects/cloudiscovery.svg?style=svg)](https://circleci.com/gh/Cloud-Architects/cloudiscovery) [![Codacy Badge](https://app.codacy.com/project/badge/Grade/c0a7a5bc51044c7ca8bd9115965e4467)](https://www.codacy.com/gh/Cloud-Architects/cloudiscovery?utm_source=github.com&utm_medium=referral&utm_content=Cloud-Architects/cloudiscovery&utm_campaign=Badge_Grade) @@ -9,115 +10,107 @@ ![aws provider](https://img.shields.io/badge/provider-AWS-orange?logo=amazon-aws&color=ff9900) -Cloudiscovery helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. Now this tool only can check resources in AWS, but we are working to expand to other providers. +Cloudiscovery helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. Now this tool only can check resources in AWS, but we are working to expand to other providers. + +The tool consists of various commands to help you understand the cloud infrastructure. ## Features -### AWS VPC +### Diagrams -Example of a diagram: +Commands can generate diagrams. When modelling them, we try to follow the principle: -![diagrams logo](docs/assets/aws-vpc.png) +> Graphical excellence is that which gives to the viewer the greatest number of ideas in the shortest time with the least ink in the smallest space. -Following resources are checked in VPC command: +Edward Tufte -* EC2 Instance -* IAM Policy -* Lambda -* RDS -* EFS -* ElastiCache -* S3 Policy -* Elasticsearch -* DocumentDB -* SQS Queue Policy -* MSK -* NAT Gateway -* Internet Gateway (IGW) -* Classic/Network/Application Load Balancer -* Route Table -* Subnet -* NACL -* Security Group -* VPC Peering -* VPC Endpoint -* EKS -* Synthetic Canary -* EMR -* ECS -* Autoscaling Group -* Media Connect -* Media Live -* Media Store Policy -* REST Api Policy -* Neptune -* CloudHSM -* Sagemaker Notebook -* Sagemaker Training Job -* Sagemaker Model +## Report -The subnets are aggregated to simplify the diagram and hide infrastructure redundancies. There can be two types of subnet aggregates: -1. Private* ones with a route `0.0.0.0/0` to Internet Gateway -2. Public* ones without any route to IGW +The commands generate reports that can be used to further analyze resources. -If EC2 instances and ECS instances are part of an autoscaling group, those instances will be aggregated on a diagram. +### CLI -### AWS Policy +1. Run the cloudiscovery command with following options (if a region not pass, this script will try to get it from ~/.aws/credentials): -Example of a diagram: +1.1 To detect AWS VPC resources (more on [AWS VPC](#aws-vpc)): -![diagrams logo](docs/assets/aws-policy.png) +```sh +cloudiscovery aws-vpc [--vpc-id vpc-xxxxxxx] --region-name xx-xxxx-xxx [--profile-name profile] [--diagram [yes/no]] [--filter xxx] [--verbose] +``` +1.2 To detect AWS policy resources (more on [AWS Policy](#aws-policy)): -Following resources are checked in Policy command: +```sh +cloudiscovery aws-policy [--profile-name profile] [--diagram [yes/no]] [--filter xxx] [--verbose] +``` +1.3 To detect AWS IoT resources (more on [AWS IoT](#aws-iot)): -* IAM User -* IAM Group -* IAM Policy -* IAM Roles -* IAM User to group relationship -* IAM User to policy relationship -* IAM Group to policy relationship -* IAM Role to policy relationship -* [AWS Principals](https://gist.github.com/shortjared/4c1e3fe52bdfa47522cfe5b41e5d6f22) that are able to assume roles +```sh +cloudiscovery aws-iot [--thing-name thing-xxxx] --region-name xx-xxxx-xxx [--profile-name profile] [--diagram [yes/no]] [--filter xxx] [--verbose] +``` -Some roles can be aggregated to simplify the diagram. If a role is associated with a principal and is not attached to any named policy, will be aggregated. +1.4 To detect all AWS resources (more on [AWS All](#aws-all)): -### AWS IoT +```sh +cloudiscovery aws-all --region-name xx-xxxx-xxx [--profile-name profile] [--services xxx,xxx] [--filter xxx] [--verbose] +``` -Example of a diagram: +1.5 To check AWS limits per resource (more on [AWS Limit](#aws-limit)): -![diagrams logo](docs/assets/aws-iot.png) +```sh +cloudiscovery aws-limit --region-name xx-xxxx-xxx [--profile-name profile] [--services xxx,xxx] [--usage 0-100] [--verbose] +``` -Following resources are checked in IoT command: +2. For help use: -* IoT Thing -* IoT Thing Type -* IoT Billing Group -* IoT Policies -* IoT Jobs -* IoT Certificates +```sh +cloudiscovery [aws-vpc|aws-policy|aws-iot|aws-all|aws-limit] -h +``` + +### Debbuging + +Enabling verbose mode, it is possible to debug all calls to the providers endpoints and check possible problems. + +### Filtering + +It's possible to filter resources by tags and resource type. To filter, add an option `--filter `, where `` can be: + +1. `Name=tags.costCenter;Value=20000` - to filter resources by a tag name `costCenter` and with value `20000`. +2. `Name=type;Value=aws_lambda_function` to only list lambda functions. + +It's possible to pass multiple values, to be able to select a value from a set. Values are split by `:` sign. If a desired value has a `:` sign, wrap it in `'` signs e.g. `--filter="Name=tags.costCenter;Value=20000:'20001:1'`. + +It is possible to pass multiple filter options, just pass `-f filter_1 -f filter_2`. In that case, the tool will return resources that match either of the filters + +Useful [CF tags](https://aws.amazon.com/blogs/devops/tracking-the-cost-of-your-aws-cloudformation-stack/): +1. `aws:cloudformation:stack-name` - Stack name +2. `aws:cloudformation:stack-id` - Stack id +3. `aws:cloudformation:logical-id` - Logical id defined in CF template ## Requirements and Installation -### AWS Resources +### Installation -This script has been written in python3+ and AWS-CLI and it works in Linux, Windows and OSX. +This tool has been written in Python3+ and AWS-CLI and it works on Linux, Windows and Mac OS. -* Make sure the latest version of AWS-CLI is installed on your workstation, and other components needed, with Python pip already installed: +Make sure the latest version of AWS-CLI is installed on your workstation, and other components needed, with Python pip already installed: ```sh pip install -U cloudiscovery ``` -* Make sure you have properly configured your AWS-CLI with a valid Access Key and Region: +### AWS Credentials + +Make sure you have properly configured your AWS-CLI with a valid Access Key and Region: ```sh aws configure ``` -### AWS Permissions +More on credentials configuration: [Configuration basics](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) -* The configured credentials must be associated to a user or role with proper permissions to do all checks. If you want to use a role with narrowed set of permissions just to perform cloud discovery, use a role from the following CF template shown below. To further increase security, you can add a block to check `aws:MultiFactorAuthPresent` condition in `AssumeRolePolicyDocument`. More on using IAM roles in the [configuration file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html). +#### AWS Permissions + +The configured credentials must be associated to a user or role with proper permissions to do all checks. If you want to use a role with narrowed set of permissions just to perform cloud discovery, use a role from the following CF template shown below. To further increase security, you can add a block to check `aws:MultiFactorAuthPresent` condition in `AssumeRolePolicyDocument`. More on using IAM roles in the [configuration file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html). ```json { @@ -151,7 +144,25 @@ aws configure "kafka:ListClusters", "synthetics:DescribeCanaries", "medialive:ListInputs", - "cloudhsm:DescribeClusters" + "cloudhsm:DescribeClusters", + "ssm:GetParametersByPath", + "servicequotas:Get*", + "amplify:ListApps", + "autoscaling-plans:DescribeScalingPlans", + "medialive:ListChannels", + "medialive:ListInputDevices", + "mediapackage:ListChannels", + "qldb:ListLedgers", + "transcribe:ListVocabularies", + "glue:GetDatabases", + "glue:GetUserDefinedFunctions", + "glue:GetSecurityConfigurations", + "glue:GetTriggers", + "glue:GetCrawlers", + "glue:ListWorkflows", + "glue:ListMLTransforms", + "codeguru-reviewer:ListCodeReviews", + "servicediscovery:ListNamespaces" ], "Resource": [ "*" ] } @@ -174,51 +185,162 @@ aws configure } ``` -* (Optional) If you want to be able to switch between multiple AWS credentials and settings, you can configure [named profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) and later pass profile name when running the tool. +(Optional) If you want to be able to switch between multiple AWS credentials and settings, you can configure [named profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) and later pass profile name when running the tool. -### Usage +## Commands -1. Run the cloudiscovery command with following options (if a region not informed, this script will try to get from ~/.aws/credentials): +### AWS VPC -1.1 To detect AWS VPC resources: +Example of a diagram: -```sh -cloudiscovery aws-vpc [--vpc-id vpc-xxxxxxx] --region-name xx-xxxx-xxx [--profile-name profile] [--diagram True/False] [--filter xxx] -``` -1.2 To detect AWS policy resources: +![diagrams logo](docs/assets/aws-vpc.png) -```sh -cloudiscovery aws-policy [--profile-name profile] [--diagram True/False] [--filter xxx] -``` -1.3 To detect AWS IoT resources: +Following resources are checked in VPC command: -```sh -cloudiscovery aws-iot [--thing-name thing-xxxx] --region-name xx-xxxx-xxx [--profile-name profile] [--diagram True/False] [--filter xxx] -``` +* Autoscaling Group +* Classic/Network/Application Load Balancer +* Client VPN Endpoints +* CloudHSM +* DocumentDB +* Directory Service +* EC2 Instance +* ECS +* EFS +* ElastiCache +* Elasticsearch +* EKS +* EMR +* IAM Policy +* Internet Gateway (IGW) +* Lambda +* Media Connect +* Media Live +* Media Store Policy +* MSK +* NACL +* NAT Gateway +* Neptune +* QuickSight +* RDS +* REST Api Policy +* Route Table +* S3 Policy +* Sagemaker Notebook +* Sagemaker Training Job +* Sagemaker Model +* Security Group +* SQS Queue Policy +* Site-to-Site VPN Connections +* Subnet +* Synthetic Canary +* VPC Peering +* VPC Endpoint +* VPN Customer Gateways +* Virtual Private Gateways +* Workspace -2. For help use: +The subnets are aggregated to simplify the diagram and hide infrastructure redundancies. There can be two types of subnet aggregates: +1. Private* ones with a route `0.0.0.0/0` to Internet Gateway +2. Public* ones without any route to IGW -```sh -cloudiscovery [aws-vpc|aws-policy|aws-iot] -h -``` +If EC2 instances and ECS instances are part of an autoscaling group, those instances will be aggregated on a diagram. -### Filtering +### AWS Policy -It's possible to filter resources by tags and resource type. To filter, add an option `--filter `, where `` can be: +Example of a diagram: -1. `Name=tags.costCenter;Value=20000` - to filter resources by a tag name `costCenter` and with value `20000`. -2. `Name=type;Value=aws_lambda_function` to only list lambda functions. +![diagrams logo](docs/assets/aws-policy.png) -It's possible to pass multiple values, to be able to select a value from a set. Values are split by `:` sign. If a desired value has a `:` sign, wrap it in `'` signs e.g. `--filter="Name=tags.costCenter;Value=20000:'20001:1'`. +Following resources are checked in Policy command: -It is possible to pass multiple filter options, just pass `-f filter_1 -f filter_2`. In that case, the tool will return resources that match either of the filters +* [AWS Principal](https://gist.github.com/shortjared/4c1e3fe52bdfa47522cfe5b41e5d6f22) that are able to assume roles +* IAM Group +* IAM Group to policy relationship +* IAM Policy +* IAM Role +* IAM Role to policy relationship +* IAM User +* IAM User to group relationship +* IAM User to policy relationship -Useful [CF tags](https://aws.amazon.com/blogs/devops/tracking-the-cost-of-your-aws-cloudformation-stack/): -1. `aws:cloudformation:stack-name` - Stack name -2. `aws:cloudformation:stack-id` - Stack id -3. `aws:cloudformation:logical-id` - Logical id defined in CF template +Some roles can be aggregated to simplify the diagram. If a role is associated with a principal and is not attached to any named policy, will be aggregated. + +### AWS IoT -### Using a Docker container +Example of a diagram: + +![diagrams logo](docs/assets/aws-iot.png) + +Following resources are checked in IoT command: + +* IoT Billing Group +* IoT Certificates +* IoT Jobs +* IoT Policies +* IoT Thing +* IoT Thing Type + +### AWS All + +A command to list **ALL** AWS resources. + +The command calls all AWS services (200+) and operations with name `Describe`, `Get...` and `List...` (500+). + +The operations must be allowed to be called by permissions described in [AWS Permissions](#aws-permissions). + +Types of resources mostly cover Terraform types. It is possible to narrow down scope of the resources to ones related with a given service with parameter `-s` e.g. `-s ec2,ecs,cloudfront,rds`. + +### AWS Limit + +It's possible to check resources limits across various service in an account. This command implements over 60 limits checks. + +With `--services value,value,value` parameter, you can narrow down checks to just services that you want to check. + +With `--threshold 0-100` option, you can customize a minimum percentage threshold to start reporting a warning. + +* Services available + * acm + * amplify + * appmesh + * appsync + * autoscaling-plans + * batch + * chime + * codebuild + * codecommit + * codeguru reviewer + * codeguru profiler + * cloudformation + * cloud map + * dynamodb + * ec2 + * ecs + * elasticfilesystem + * elasticbeanstalk + * elasticloadbalancing + * glue + * iam + * kms + * mediaconnect + * medialive + * mediapackage + * qldb + * robomaker + * route53 + * route53resolver + * rds + * s3 + * sns + * transcribe + * translate + * vpc + +AWS has a default quota to all services. At the first time that an account is created, AWS apply this default quota to all services. +An administrator can ask to increase the quota value of a certain service via ticket. This command helps administrators detect those issues in advance. + +More information: [AWS WA, REL 1 How do you manage service limits?](https://wa.aws.amazon.com/wat.question.REL_1.en.html) + +## Using a Docker container To build docker container using Dockerfile @@ -237,9 +359,9 @@ cloudiscovery \ ``` -* If you are using Diagram output and due to fact container is a slim image of Python image, you must run cloudiscovery with "--diagram False", otherwise you'll have an error about "xdg-open". The output file will be saved in "assets/diagrams". +* If you are using Diagram output and due to fact container is a slim image of Python image, you must run cloudiscovery with "--diagram no", otherwise you'll have an error about "xdg-open". The output file will be saved in "assets/diagrams". -### Translate +## Translate This project support English and Portuguese (Brazil) languages. To contribute with a translation, follow this steps: @@ -250,11 +372,11 @@ This project support English and Portuguese (Brazil) languages. To contribute wi python msgfmt.py -o locales/NEWFOLDER/LC_MESSAGES/messages.mo locales/NEWFOLDER/LC_MESSAGES/messages ``` -### Contributing +## Contributing If you have improvements or fixes, we would love to have your contributions. Please use [PEP 8](https://pycodestyle.readthedocs.io/en/latest/) code style. -### Development +## Development When developing, it's recommended to use [venv](https://docs.python.org/3/library/venv.html). @@ -295,12 +417,14 @@ To run pre-commit hooks, you can issue the following command: pre-commit run --all-files ``` +To add new resources to check limit, please remove "assets/.cache/cache.db" + ## Making a release 1. Update the version in cloudiscovery/__init\__.py and create a new git tag with `git tag $VERSION`. 2. Once you push the tag to GitHub with `git push --tags`, a new CircleCI build is triggered. -### Similar projects and products +## Similar projects and products * [mingrammer/diagrams](https://github.com/mingrammer/diagrams) - library being used to draw diagrams * [Lucidchart Cloud Insights](https://www.lucidchart.com/pages/solutions/cloud-insights) - commercial extension to Lucidchart diff --git a/cloudiscovery/__init__.py b/cloudiscovery/__init__.py index 79a940c..bb5884f 100644 --- a/cloudiscovery/__init__.py +++ b/cloudiscovery/__init__.py @@ -30,28 +30,40 @@ from provider.policy.command import Policy from provider.vpc.command import Vpc from provider.iot.command import Iot +from provider.all.command import All +from provider.limit.command import Limit -# Check version from shared.common import ( exit_critical, - generate_session, Filterable, parse_filters, ) +from shared.common_aws import aws_verbose, generate_session # pylint: enable=wrong-import-position - +# Check version if sys.version_info < (3, 6): print("Python 3.6 or newer is required", file=sys.stderr) sys.exit(1) -__version__ = "2.1.1" +__version__ = "2.2.0" AVAILABLE_LANGUAGES = ["en_US", "pt_BR"] -DIAGRAMS_OPTIONS = ["True", "False"] DEFAULT_REGION = "us-east-1" +def str2bool(v): + if isinstance(v, bool): + return v + # pylint: disable=no-else-return + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") + + def generate_parser(): parser = argparse.ArgumentParser() @@ -76,13 +88,41 @@ def generate_parser(): ) policy_parser = subparsers.add_parser("aws-policy", help="Analyze policies") - add_default_arguments(policy_parser, is_global=True) + all_parser = subparsers.add_parser("aws-all", help="Analyze all resources") + add_default_arguments(all_parser, diagram_enabled=False) + add_services_argument(all_parser) + + limit_parser = subparsers.add_parser( + "aws-limit", help="Analyze aws limit resources." + ) + add_default_arguments(limit_parser, diagram_enabled=False, filters_enabled=False) + add_services_argument(limit_parser) + limit_parser.add_argument( + "-t", + "--threshold", + required=False, + help="Select the %% of resource threshold between 0 and 100. \ + For example: --threshold 50 will report all resources with more than 50%% threshold.", + ) + return parser -def add_default_arguments(parser, is_global=False): +def add_services_argument(limit_parser): + limit_parser.add_argument( + "-s", + "--services", + required=False, + help='Define services that you want to check, use "," (comma) to separate multiple names. \ + If not passed, command will check all services.', + ) + + +def add_default_arguments( + parser, is_global=False, diagram_enabled=True, filters_enabled=True +): if not is_global: parser.add_argument( "-r", @@ -95,27 +135,41 @@ def add_default_arguments(parser, is_global=False): "-p", "--profile-name", required=False, help="Profile to be used" ) parser.add_argument( - "-l", "--language", required=False, help="available languages: pt_BR, en_US" + "-l", "--language", required=False, help="Available languages: pt_BR, en_US" ) parser.add_argument( - "-f", - "--filters", - action="append", - required=False, - help="filter resources (tags only for now, you must specify name and values); multiple filters are possible " - "to pass with -f -f approach, values can be separated by : sign; " - "example: Name=tags.costCenter;Value=20000:'20001:1'", - ) - parser.add_argument( - "-d", - "--diagram", - required=False, - help='print diagram with resources (need Graphviz installed). Use options "True" to ' - 'view image or "False" to save image to disk. Default True', + "--verbose", + "--verbose", + type=str2bool, + nargs="?", + const=True, + default=False, + help="Enable debug mode to sdk calls (default false)", ) + if filters_enabled: + parser.add_argument( + "-f", + "--filters", + action="append", + required=False, + help="filter resources (tags only for now, you must specify name and values); multiple filters " + "are possible to pass with -f -f approach, values can be separated by : sign; " + "example: Name=tags.costCenter;Value=20000:'20001:1'", + ) + if diagram_enabled: + parser.add_argument( + "-d", + "--diagram", + type=str2bool, + nargs="?", + const=True, + default=True, + help="print diagram with resources (need Graphviz installed). Pass true/y[es] to " + "view image or false/n[o] not to generate image. Default true", + ) -# pylint: disable=too-many-branches +# pylint: disable=too-many-branches,too-many-statements def main(): # Entry point for the CLI. # Load commands @@ -126,14 +180,18 @@ def main(): args = parser.parse_args() + # Check if verbose mode is enabled + if args.verbose: + aws_verbose() + if args.language is None or args.language not in AVAILABLE_LANGUAGES: language = "en_US" else: language = args.language # Diagram check - if args.diagram is not None and args.diagram not in DIAGRAMS_OPTIONS: - diagram = "True" + if "diagram" not in args: + diagram = False else: diagram = args.diagram @@ -145,20 +203,13 @@ def main(): _ = defaultlanguage.gettext # diagram version check - if diagram: - # Checking diagram version. Must be 0.13 or higher - if pkg_resources.get_distribution("diagrams").version < "0.14": - exit_critical( - _( - "You must update diagrams package to 0.14 or higher. " - "- See on https://github.com/mingrammer/diagrams" - ) - ) + check_diagram_version(diagram) # filters check filters: List[Filterable] = [] - if args.filters is not None: - filters = parse_filters(args.filters) + if "filters" in args: + if args.filters is not None: + filters = parse_filters(args.filters) # aws profile check session = generate_session(args.profile_name) @@ -177,32 +228,48 @@ def main(): # get regions region_names = check_region( - region_parameter=args.region_name, region_name=region_name, session=session + region_parameter=args.region_name, region_name=region_name, session=session, ) + if "threshold" in args: + if args.threshold is not None: + if args.threshold.isdigit() is False: + exit_critical(_("Threshold must be between 0 and 100")) + else: + if int(args.threshold) < 0 or int(args.threshold) > 100: + exit_critical(_("Threshold must be between 0 and 100")) + if args.command == "aws-vpc": - command = Vpc( - vpc_id=args.vpc_id, - region_names=region_names, - session=session, - diagram=diagram, - filters=filters, - ) + command = Vpc(vpc_id=args.vpc_id, region_names=region_names, session=session,) elif args.command == "aws-policy": - command = Policy( - region_names=region_names, session=session, diagram=diagram, filters=filters - ) + command = Policy(region_names=region_names, session=session,) elif args.command == "aws-iot": command = Iot( - thing_name=args.thing_name, - region_names=region_names, - session=session, - diagram=diagram, - filters=filters, + thing_name=args.thing_name, region_names=region_names, session=session, + ) + elif args.command == "aws-all": + command = All(region_names=region_names, session=session) + elif args.command == "aws-limit": + command = Limit( + region_names=region_names, session=session, threshold=args.threshold, ) else: raise NotImplementedError("Unknown command") - command.run() + if "services" in args and args.services is not None: + services = args.services.split(",") + else: + services = [] + command.run(diagram, args.verbose, services, filters) + + +def check_diagram_version(diagram): + if diagram: + # Checking diagram version. Must be 0.13 or higher + if pkg_resources.get_distribution("diagrams").version < "0.14": + exit_critical( + "You must update diagrams package to 0.14 or higher. " + "- See on https://github.com/mingrammer/diagrams" + ) def check_region(region_parameter, region_name, session): @@ -214,7 +281,8 @@ def check_region(region_parameter, region_name, session): client = session.client("ec2", region_name=DEFAULT_REGION) valid_region_names = [ - region["RegionName"] for region in client.describe_regions()["Regions"] + region["RegionName"] + for region in client.describe_regions(AllRegions=True)["Regions"] ] if region_parameter != "all": diff --git a/cloudiscovery/provider/all/__init__.py b/cloudiscovery/provider/all/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cloudiscovery/provider/all/command.py b/cloudiscovery/provider/all/command.py new file mode 100644 index 0000000..72c0f25 --- /dev/null +++ b/cloudiscovery/provider/all/command.py @@ -0,0 +1,44 @@ +from typing import List + +from shared.common import Filterable, BaseOptions +from shared.common_aws import BaseAwsOptions, BaseAwsCommand, AwsCommandRunner +from shared.diagram import NoDiagram + + +class AllOptions(BaseAwsOptions, BaseOptions): + services: List[str] + + # pylint: disable=too-many-arguments + def __init__(self, verbose, filters, session, region_name, services: List[str]): + BaseAwsOptions.__init__(self, session, region_name) + BaseOptions.__init__(self, verbose, filters) + self.services = services + + +class All(BaseAwsCommand): + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + for region in self.region_names: + self.init_region_cache(region) + options = AllOptions( + verbose=verbose, + filters=filters, + session=self.session, + region_name=region, + services=services, + ) + + command_runner = AwsCommandRunner(filters) + command_runner.run( + provider="all", + options=options, + diagram_builder=NoDiagram(), + title="AWS Resources - Region {}".format(region), + # pylint: disable=no-member + filename=options.resulting_file_name("all"), + ) diff --git a/cloudiscovery/provider/all/resource/__init__.py b/cloudiscovery/provider/all/resource/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cloudiscovery/provider/all/resource/all.py b/cloudiscovery/provider/all/resource/all.py new file mode 100644 index 0000000..34967fe --- /dev/null +++ b/cloudiscovery/provider/all/resource/all.py @@ -0,0 +1,648 @@ +import collections +import functools +import re +from concurrent.futures.thread import ThreadPoolExecutor +from typing import List, Optional + +from botocore.exceptions import UnknownServiceError +from botocore.loaders import Loader + +from provider.all.command import AllOptions +from shared.common import ( + ResourceProvider, + Resource, + ResourceDigest, + message_handler, + ResourceAvailable, + log_critical, +) +from shared.common_aws import get_paginator, resource_tags + +OMITTED_RESOURCES = [ + "aws_cloudhsm_available_zone", + "aws_cloudhsm_hapg", + "aws_cloudhsm_hsm", + "aws_cloudhsm_luna_client", + "aws_dax_default_parameter", + "aws_dax_parameter_group", + "aws_ec2_reserved_instances_offering", + "aws_ec2_snapshot", + "aws_ec2_spot_price_history", + "aws_ssm_available_patch", + "aws_ssm_document", + "aws_polly_voice", + "aws_lightsail_blueprint", + "aws_lightsail_bundle", + "aws_lightsail_region", + "aws_elastictranscoder_preset", + "aws_ec2_vpc_endpoint_service", + "aws_dms_endpoint_type", + "aws_elasticache_service_update", + "aws_elasticache_cache_parameter_group", + "aws_rds_source_region", + "aws_ssm_association", + "aws_ssm_patch_baseline", + "aws_ec2_prefix", + "aws_ec2_image", + "aws_ec2_region", + "aws_opsworks_operating_system", + "aws_rds_account_attribute", + "aws_route53_geo_location", + "aws_redshift_cluster_track", + "aws_directconnect_location", + "aws_dms_account_attribute", + "aws_securityhub_standard", + "aws_ram_resource_type", + "aws_ram_permission", + "aws_ec2_account_attribute", + "aws_elasticbeanstalk_available_solution_stack", + "aws_redshift_account_attribute", + "aws_opsworks_user_profile", + "aws_directconnect_direct_connect_gateway_association", # DirectConnect resources endpoint are complicated + "aws_directconnect_direct_connect_gateway_attachment", + "aws_directconnect_interconnect", + "aws_dms_replication_task_assessment_result", + "aws_ec2_fpga_image", + "aws_ec2_launch_template_version", + "aws_ec2_reserved_instancesing", + "aws_ec2_spot_datafeed_subscription", + "aws_ec2_transit_gateway_multicast_domain", + "aws_elasticbeanstalk_configuration_option", + "aws_elasticbeanstalk_platform_version", + "aws_iam_credential_report", + "aws_iam_account_password_policy", + "aws_importexport_job", + "aws_iot_o_taupdate", + "aws_iot_default_authorizer", + "aws_workspaces_account", + "aws_workspaces_account_modification", + "aws_rds_export_task", + "aws_rds_custom_availability_zone", + "aws_rds_installation_media", + "aws_rds_d_bsecurity_group", + "aws_translate_text_translation_job", + "aws_rekognition_project", + "aws_rekognition_stream_processor", + "aws_sdb_domain", + "aws_redshift_table_restore_status", + "aws_iot_v2_logging_level", + "aws_license_manager_resource_inventory", + "aws_license_manager_license_configuration", + "aws_logs_query_definition", + "aws_autoscaling_scaling_activity", + "aws_cloudwatch_metric", + "aws_organizations_handshakes_for_organization", + "aws_config_organization_config_rule", + "aws_organizations_root", + "aws_organizations_delegated_administrator", + "aws_organizations_create_account_status", + "aws_config_organization_conformance_pack_status", + "aws_config_organization_conformance_pack", + "aws_ec2_reserved_instances_listing", + "aws_redshift_cluster_security_group", + "aws_guardduty_organization_admin_account", + "aws_elasticache_cache_security_group", + "aws_organizations_aws_service_access_for_organization", + "aws_organizations_account", + "aws_config_organization_config_rule_status", + "aws_dynamodb_backup", + "aws_ec2_prefix_list", +] + +# Trying to fix documentation errors or its lack made by "happy pirates" at AWS +REQUIRED_PARAMS_OVERRIDE = { + "batch": {"ListJobs": ["jobQueue"]}, + "cloudformation": { + "DescribeStackEvents": ["stackName"], + "DescribeStackResources": ["stackName"], + "GetTemplate": ["stackName"], + "ListTypeVersions": ["arn"], + }, + "codecommit": {"GetBranch": ["repositoryName"]}, + "codedeploy": { + "GetDeploymentTarget": ["deploymentId"], + "ListDeploymentTargets": ["deploymentId"], + }, + "ecs": { + "ListTasks": ["cluster"], + "ListServices": ["cluster"], + "ListContainerInstances": ["cluster"], + }, + "elasticbeanstalk": { + "DescribeEnvironmentHealth": ["environmentName"], + "DescribeEnvironmentManagedActionHistory": ["environmentName"], + "DescribeEnvironmentManagedActions": ["environmentName"], + "DescribeEnvironmentResources": ["environmentName"], + "DescribeInstancesHealth": ["environmentName"], + }, + "iam": { + "GetUser": ["userName"], + "ListAccessKeys": ["userName"], + "ListServiceSpecificCredentials": ["userName"], + "ListSigningCertificates": ["userName"], + "ListMFADevices": ["userName"], + "ListSSHPublicKeys": ["userName"], + }, + "iot": {"ListAuditFindings": ["taskId"]}, + "opsworks": { + "ListAuditFindings": ["taskId"], + "DescribeAgentVersions": ["stackId"], + "DescribeApps": ["stackId"], + "DescribeCommands": ["deploymentId"], + "DescribeDeployments": ["appId"], + "DescribeEcsClusters": ["ecsClusterArns"], + "DescribeElasticIps": ["stackId"], + "DescribeElasticLoadBalancers": ["stackId"], + "DescribeInstances": ["stackId"], + "DescribeLayers": ["stackId"], + "DescribePermissions": ["stackId"], + "DescribeRaidArrays": ["stackId"], + "DescribeVolumes": ["stackId"], + }, + "ssm": {"DescribeMaintenanceWindowSchedule": ["windowId"],}, + "shield": {"DescribeProtection": ["protectionId"],}, + "waf": { + "ListActivatedRulesInRuleGroup": ["ruleGroupId"], + "ListLoggingConfigurations": ["limit"], + }, + "waf-regional": { + "ListActivatedRulesInRuleGroup": ["ruleGroupId"], + "ListLoggingConfigurations": ["limit"], + }, + "wafv2": {"ListLoggingConfigurations": ["limit"],}, +} + +ON_TOP_POLICIES = [ + "kafka:ListClusters", + "synthetics:DescribeCanaries", + "medialive:ListInputs", + "cloudhsm:DescribeClusters", + "ssm:GetParametersByPath", +] + +SKIPPED_SERVICES = [ + "sagemaker" +] # those services have too unreliable API to make use of it + +PARALLEL_SERVICE_CALLS = 80 + + +def _to_snake_case(camel_case): + return ( + re.sub("(?!^)([A-Z]+)", r"_\1", camel_case) + .lower() + .replace("open_idconnect", "open_id_connect") + .replace("samlproviders", "saml_providers") + .replace("sshpublic_keys", "ssh_public_keys") + .replace("mfadevices", "mfa_devices") + .replace("cacertificates", "ca_certificates") + .replace("awsservice", "aws_service") + .replace("dbinstances", "db_instances") + .replace("drtaccess", "drt_access") + .replace("ipsets", "ip_sets") + .replace("mljobs", "ml_jobs") + .replace("dbcluster", "db_cluster") + .replace("dbengine", "db_engine") + .replace("dbsecurity", "db_security") + .replace("dbsubnet", "db_subnet") + .replace("dbsnapshot", "db_snapshot") + .replace("dbproxies", "db_proxies") + .replace("dbparameter", "db_parameter") + .replace("dbinstance", "db_instance") + .replace("d_bparameter", "db_parameter") + .replace("s_amlproviders", "saml_providers") + .replace("a_wsservice", "aws_service") + ) + + +PLURAL_TO_SINGULAR = { + "ies": "y", + "status": "status", + "ches": "ch", + "ses": "s", +} + + +def singular_from_plural(name: str) -> str: + if name.endswith("s"): + for plural_suffix, singular_suffix in PLURAL_TO_SINGULAR.items(): + if name.endswith(plural_suffix): + name = name[: -len(plural_suffix)] + singular_suffix + return name + if not name.endswith("ss"): + name = name[:-1] + return name + + +def last_singular_name_element(operation_name): + last_name = re.findall("[A-Z][^A-Z]*", operation_name)[-1] + return singular_from_plural(last_name) + + +def retrieve_resource_name(resource, operation_name): + resource_name = None + last_name = last_singular_name_element(operation_name) + if "name" in resource: + resource_name = resource["name"] + elif "Name" in resource: + resource_name = resource["Name"] + elif last_name + "Name" in resource: + resource_name = resource[last_name + "Name"] + elif only_one_suffix(resource, "name"): + resource_name = only_one_suffix(resource, "name") + + return resource_name + + +# pylint: disable=inconsistent-return-statements +def only_one_suffix(resource, suffix): + id_keys = [] + last_id_val = None + for key, val in resource.items(): + if key.lower().endswith(suffix) and not key.lower().endswith( + "display" + suffix + ): + id_keys.append(key) + last_id_val = val + if len(id_keys) == 1: + return last_id_val + return None + + +def retrieve_resource_id(resource, operation_name, resource_name): + resource_id = resource_name + last_name = last_singular_name_element(operation_name) + if "id" in resource: + resource_id = resource["id"] + elif last_name + "Id" in resource: + resource_id = resource[last_name + "Id"] + elif only_one_suffix(resource, "id"): + resource_id = only_one_suffix(resource, "id") + elif "arn" in resource: + resource_id = resource["arn"] + elif last_name + "Arn" in resource: + resource_id = resource[last_name + "Arn"] + elif only_one_suffix(resource, "arn"): + resource_id = only_one_suffix(resource, "arn") + + return resource_id + + +def operation_allowed( + allowed_actions: List[str], aws_service: str, operation_name: str +): + evaluation_result = False + for action in allowed_actions: + if action == "*": + evaluation_result = True + break + action_service = action.split(":", 1)[0] + if not action_service == aws_service: + continue + action_operation = action.split(":", 1)[1] + if action_operation.endswith("*") and operation_name.startswith( + action_operation[:-1] + ): + evaluation_result = True + break + if operation_name == action_operation: + evaluation_result = True + break + return evaluation_result + + +def build_resource( + base_resource, operation_name, resource_type, group +) -> Optional[Resource]: + if isinstance(base_resource, str): + return None + resource_name = retrieve_resource_name(base_resource, operation_name) + resource_id = retrieve_resource_id(base_resource, operation_name, resource_name) + + if resource_id is None or resource_name is None: + return None + attributes = flatten(base_resource) + return Resource( + digest=ResourceDigest(id=resource_id, type=resource_type), + group=group, + name=resource_name, + attributes=attributes, + tags=resource_tags(base_resource), + ) + + +def all_exception(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + # pylint: disable=broad-except + except Exception as e: + if func.__qualname__ == "AllResources.analyze_operation": + if not args[0].options.verbose: + return + exception_str = str(e) + if ( + "is not subscribed to AWS Security Hub" in exception_str + or "not enabled for securityhub" in exception_str + or "The subscription does not exist" in exception_str + or "calling the DescribeHub operation" in exception_str + ): + message_handler( + "Operation {} not accessible, AWS Security Hub is not configured... Skipping".format( + args[2] + ), + "WARNING", + ) + elif ( + "not connect to the endpoint URL" in exception_str + or "not available in this region" in exception_str + or "API is not available" in exception_str + ): + message_handler( + "Service {} not available in the selected region... Skipping".format( + args[5] + ), + "WARNING", + ) + elif ( + "Your account is not a member of an organization" in exception_str + or "This action can only be made by accounts in an AWS Organization" + in exception_str + or "The request failed because organization is not in use" + in exception_str + ): + message_handler( + "Service {} only available to account in an AWS Organization... Skipping".format( + args[5] + ), + "WARNING", + ) + elif "is no longer available to new customers" in exception_str: + message_handler( + "Service {} is no longer available to new customers... Skipping".format( + args[5] + ), + "WARNING", + ) + elif ( + "only available to Master account in AWS FM" in exception_str + or "not currently delegated by AWS FM" in exception_str + ): + message_handler( + "Operation {} not accessible, not master account in AWS FM... Skipping".format( + args[2] + ), + "WARNING", + ) + else: + log_critical( + "\nError running operation {}, type {}. Error message {}".format( + args[2], args[1], exception_str + ) + ) + else: + log_critical( + "\nError running method {}. Error message {}".format( + func.__qualname__, str(e) + ) + ) + + return wrapper + + +def build_resource_type(aws_service, name): + resource_name = re.sub(r"^List", "", name) + resource_name = re.sub(r"^Get", "", resource_name) + resource_name = re.sub(r"^Describe", "", resource_name) + return singular_from_plural( + "aws_{}_{}".format( + aws_service.replace("-", "_"), _to_snake_case(resource_name), + ) + ) + + +def flatten(d, parent_key="", sep="."): + items = [] + for k, v in d.items(): + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, collections.MutableMapping): + items.extend(flatten(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +class AllResources(ResourceProvider): + def __init__(self, options: AllOptions): + """ + All resources + + :param options: + """ + super().__init__() + self.options = options + self.availabilityCheck = ResourceAvailable("") + + @all_exception + def get_resources(self) -> List[Resource]: + boto_loader = Loader() + if self.options.services: + aws_services = self.options.services + else: + aws_services = boto_loader.list_available_services(type_name="service-2") + resources = [] + allowed_actions = self.get_policies_allowed_actions() + + if self.options.verbose: + message_handler( + "Analyzing listing operations across {} service...".format( + len(aws_services) + ), + "HEADER", + ) + with ThreadPoolExecutor(PARALLEL_SERVICE_CALLS) as executor: + results = executor.map( + lambda aws_service: self.analyze_service( + aws_service, boto_loader, allowed_actions + ), + aws_services, + ) + for service_resources in results: + if service_resources is not None: + resources.extend(service_resources) + + return resources + + @all_exception + def analyze_service(self, aws_service, boto_loader, allowed_actions): + resources = [] + client = self.options.client(aws_service) + service_model = boto_loader.load_service_model(aws_service, "service-2") + try: + paginators_model = boto_loader.load_service_model( + aws_service, "paginators-1" + ) + except UnknownServiceError: + paginators_model = {"pagination": {}} + service_full_name = service_model["metadata"]["serviceFullName"] + if self.options.verbose: + message_handler( + "Collecting data from {}...".format(service_full_name), "HEADER" + ) + if ( + not self.availabilityCheck.is_service_available( + self.options.region_name, aws_service + ) + or aws_service in SKIPPED_SERVICES + ) and self.options.verbose: + message_handler( + "Service {} not available in this region... Skipping".format( + service_full_name + ), + "WARNING", + ) + return None + for name, operation in service_model["operations"].items(): + if ( + name.startswith("List") + or name.startswith("Get") + or name.startswith("Describe") + ): + has_paginator = name in paginators_model["pagination"] + if "input" in operation: + input_model = service_model["shapes"][operation["input"]["shape"]] + if "required" in input_model and input_model["required"]: + continue + if ( + aws_service in REQUIRED_PARAMS_OVERRIDE + and operation["name"] in REQUIRED_PARAMS_OVERRIDE[aws_service] + ): + continue + resource_type = build_resource_type(aws_service, name) + if resource_type in OMITTED_RESOURCES: + continue + if not operation_allowed(allowed_actions, aws_service, name): + continue + analyze_operation = self.analyze_operation( + resource_type, + name, + has_paginator, + client, + service_full_name, + aws_service, + ) + if analyze_operation is not None: + resources.extend(analyze_operation) + return resources + + @all_exception + # pylint: disable=too-many-locals,too-many-arguments + def analyze_operation( + self, + resource_type, + operation_name, + has_paginator, + client, + service_full_name, + aws_service, + ) -> List[Resource]: + resources = [] + snake_operation_name = _to_snake_case(operation_name) + # pylint: disable=too-many-nested-blocks + if has_paginator: + pages = get_paginator( + client=client, + operation_name=snake_operation_name, + resource_type=resource_type, + filters=None, + ) + list_metadata = pages.result_keys[0].parsed + result_key = None + result_parent = None + result_child = None + if "value" in list_metadata: + result_key = list_metadata["value"] + elif "type" in list_metadata and list_metadata["type"] == "subexpression": + result_parent = list_metadata["children"][0]["value"] + result_child = list_metadata["children"][1]["value"] + else: + if self.options.verbose: + message_handler( + "Operation {} has unsupported pagination definition... Skipping".format( + snake_operation_name + ), + "WARNING", + ) + return [] + for page in pages: + if result_key == "Reservations": # hack for EC2 instances + for page_reservation in page["Reservations"]: + for instance in page_reservation["Instances"]: + resource = build_resource( + instance, operation_name, resource_type, aws_service + ) + if resource is not None: + resources.append(resource) + if result_key is not None: + page_resources = page[result_key] + elif result_child in page[result_parent]: + page_resources = page[result_parent][result_child] + else: + page_resources = [] + for page_resource in page_resources: + resource = build_resource( + page_resource, operation_name, resource_type, aws_service + ) + if resource is not None: + resources.append(resource) + else: + + response = getattr(client, snake_operation_name)() + for response_elem in response.values(): + if isinstance(response_elem, list): + for response_resource in response_elem: + resource = build_resource( + response_resource, + operation_name, + resource_type, + aws_service, + ) + if resource is not None: + resources.append(resource) + return resources + + def get_policies_allowed_actions(self): + if self.options.verbose: + message_handler("Fetching allowed actions...", "HEADER") + iam_client = self.options.client("iam") + view_only_document = self.get_policy_allowed_calls( + iam_client, "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" + ) + sec_audit_document = self.get_policy_allowed_calls( + iam_client, "arn:aws:iam::aws:policy/SecurityAudit" + ) + + allowed_actions = {} + for action in view_only_document["Statement"][0]["Action"]: + allowed_actions[action] = True + for action in sec_audit_document["Statement"][0]["Action"]: + allowed_actions[action] = True + for action in ON_TOP_POLICIES: + allowed_actions[action] = True + if self.options.verbose: + message_handler( + "Found {} allowed actions".format(len(allowed_actions)), "HEADER" + ) + + return allowed_actions.keys() + + def get_policy_allowed_calls(self, iam_client, policy_arn): + policy_version_id = iam_client.get_policy(PolicyArn=policy_arn)["Policy"][ + "DefaultVersionId" + ] + policy_document = iam_client.get_policy_version( + PolicyArn=policy_arn, VersionId=policy_version_id + )["PolicyVersion"]["Document"] + + return policy_document diff --git a/cloudiscovery/provider/iot/command.py b/cloudiscovery/provider/iot/command.py index f0b3b6f..fc31442 100644 --- a/cloudiscovery/provider/iot/command.py +++ b/cloudiscovery/provider/iot/command.py @@ -1,57 +1,62 @@ +from typing import List + from provider.iot.diagram import IoTDiagram -from shared.command import CommandRunner, BaseCommand -from shared.common import BaseAwsOptions, ResourceDigest +from shared.common import ResourceDigest, Filterable, BaseOptions +from shared.common_aws import BaseAwsOptions, BaseAwsCommand, AwsCommandRunner from shared.diagram import NoDiagram, BaseDiagram -class IotOptions(BaseAwsOptions): +class IotOptions(BaseAwsOptions, BaseOptions): thing_name: str - def __new__(cls, session, region_name, thing_name): - """ - Iot options - - :param session: - :param region_name: - :param thing_name: - """ - self = super(BaseAwsOptions, cls).__new__(cls, (session, region_name)) + # pylint: disable=too-many-arguments + def __init__(self, verbose, filters, session, region_name, thing_name): + BaseAwsOptions.__init__(self, session, region_name) + BaseOptions.__init__(self, verbose, filters) self.thing_name = thing_name - return self def iot_digest(self): return ResourceDigest(id=self.thing_name, type="aws_iot") -class Iot(BaseCommand): +class Iot(BaseAwsCommand): # pylint: disable=too-many-arguments - def __init__(self, thing_name, region_names, session, diagram, filters): + def __init__(self, thing_name, region_names, session): """ Iot command :param thing_name: :param region_names: :param session: - :param diagram: - :param filters: """ - super().__init__(region_names, session, diagram, filters) + super().__init__(region_names, session) self.thing_name = thing_name - def run(self): - command_runner = CommandRunner(self.filters) + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + command_runner = AwsCommandRunner(filters) for region_name in self.region_names: + self.init_region_cache(region_name) # if thing_name is none, get all things and check if self.thing_name is None: client = self.session.client("iot", region_name=region_name) things = client.list_things() thing_options = IotOptions( - session=self.session, region_name=region_name, thing_name=things + verbose=verbose, + filters=filters, + session=self.session, + region_name=region_name, + thing_name=things, ) diagram_builder: BaseDiagram - if self.diagram: + if diagram: diagram_builder = IoTDiagram(thing_name="") else: diagram_builder = NoDiagram() @@ -66,10 +71,14 @@ def run(self): things = dict() things["things"] = [{"thingName": self.thing_name}] thing_options = IotOptions( - session=self.session, region_name=region_name, thing_name=things + verbose=verbose, + filters=filters, + session=self.session, + region_name=region_name, + thing_name=things, ) - if self.diagram: + if diagram: diagram_builder = IoTDiagram(thing_name=self.thing_name) else: diagram_builder = NoDiagram() diff --git a/cloudiscovery/provider/iot/resource/certificate.py b/cloudiscovery/provider/iot/resource/certificate.py index ad12101..d387b4f 100644 --- a/cloudiscovery/provider/iot/resource/certificate.py +++ b/cloudiscovery/provider/iot/resource/certificate.py @@ -7,8 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -23,13 +24,15 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Certificates...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Certificates...", "HEADER") for thing in self.iot_options.thing_name["things"]: diff --git a/cloudiscovery/provider/iot/resource/policy.py b/cloudiscovery/provider/iot/resource/policy.py index b12a5bc..cbbe836 100644 --- a/cloudiscovery/provider/iot/resource/policy.py +++ b/cloudiscovery/provider/iot/resource/policy.py @@ -7,8 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -23,13 +24,15 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Policies...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Policies...", "HEADER") for thing in self.iot_options.thing_name["things"]: diff --git a/cloudiscovery/provider/iot/resource/thing.py b/cloudiscovery/provider/iot/resource/thing.py index dddc647..9c67455 100644 --- a/cloudiscovery/provider/iot/resource/thing.py +++ b/cloudiscovery/provider/iot/resource/thing.py @@ -7,8 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -23,12 +24,14 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Things...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Things...", "HEADER") for thing in self.iot_options.thing_name["things"]: client.describe_thing(thingName=thing["thingName"]) @@ -58,13 +61,15 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Things Type...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Things Type...", "HEADER") for thing in self.iot_options.thing_name["things"]: @@ -116,13 +121,15 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Jobs...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Jobs...", "HEADER") for thing in self.iot_options.thing_name["things"]: @@ -177,13 +184,15 @@ def __init__(self, iot_options: IotOptions): self.iot_options = iot_options @exception + @ResourceAvailable(services="iot") def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] - message_handler("Collecting data from IoT Billing Group...", "HEADER") + if self.iot_options.verbose: + message_handler("Collecting data from IoT Billing Group...", "HEADER") for thing in self.iot_options.thing_name["things"]: diff --git a/cloudiscovery/provider/limit/__init__.py b/cloudiscovery/provider/limit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cloudiscovery/provider/limit/command.py b/cloudiscovery/provider/limit/command.py new file mode 100644 index 0000000..83f9bd1 --- /dev/null +++ b/cloudiscovery/provider/limit/command.py @@ -0,0 +1,176 @@ +from typing import List + +from shared.common import ( + ResourceCache, + message_handler, + Filterable, + BaseOptions, + log_critical, +) +from shared.common_aws import BaseAwsOptions, BaseAwsCommand, AwsCommandRunner +from shared.diagram import NoDiagram +from provider.limit.data.allowed_resources import ALLOWED_SERVICES_CODES + + +class LimitOptions(BaseAwsOptions, BaseOptions): + services: List[str] + threshold: str + + # pylint: disable=too-many-arguments + def __init__( + self, + verbose: bool, + filters: List[Filterable], + session, + region_name, + services, + threshold, + ): + BaseAwsOptions.__init__(self, session, region_name) + BaseOptions.__init__(self, verbose, filters) + self.services = services + self.threshold = threshold + + +class LimitParameters: + def __init__(self, session, region: str, services, options: LimitOptions): + self.region = region + self.cache = ResourceCache() + self.session = session + self.options = options + self.services = [] + if services is None: + for service in ALLOWED_SERVICES_CODES: + self.services.append(service) + else: + self.services = services + + def init_globalaws_limits_cache(self): + """ + AWS has global limit that can be adjustable and others that can't be adjustable + This method make cache for 15 days for aws cache global parameters. AWS don't update limit every time. + Services has differents limit, depending on region. + """ + for service_code in self.services: + if service_code in ALLOWED_SERVICES_CODES: + cache_key = "aws_limits_" + service_code + "_" + self.region + + cache = self.cache.get_key(cache_key) + if cache is not None: + continue + + if self.options.verbose: + message_handler( + "Fetching aws global limit to service {} in region {} to cache...".format( + service_code, self.region + ), + "HEADER", + ) + + cache_codes = dict() + for quota_code in ALLOWED_SERVICES_CODES[service_code]: + + if quota_code != "global": + """ + Impossible to instance once at __init__ method. + Global services such route53 MUST USE us-east-1 region + """ + if ALLOWED_SERVICES_CODES[service_code]["global"]: + service_quota = self.session.client( + "service-quotas", region_name="us-east-1" + ) + else: + service_quota = self.session.client( + "service-quotas", region_name=self.region + ) + + item_to_add = self.get_quota( + quota_code, service_code, service_quota + ) + if item_to_add is None: + continue + + if service_code in cache_codes: + cache_codes[service_code].append(item_to_add) + else: + cache_codes[service_code] = [item_to_add] + + self.cache.set_key(key=cache_key, value=cache_codes, expire=1296000) + + return True + + def get_quota(self, quota_code, service_code, service_quota): + try: + response = service_quota.get_aws_default_service_quota( + ServiceCode=service_code, QuotaCode=quota_code + ) + # pylint: disable=broad-except + except Exception as e: + if self.options.verbose: + log_critical( + "\nCannot take quota {} for {}: {}".format( + quota_code, service_code, str(e) + ) + ) + return None + item_to_add = { + "value": response["Quota"]["Value"], + "adjustable": response["Quota"]["Adjustable"], + "quota_code": quota_code, + "quota_name": response["Quota"]["QuotaName"], + } + return item_to_add + + +class Limit(BaseAwsCommand): + def __init__(self, region_names, session, threshold): + """ + All AWS resources + + :param region_names: + :param session: + :param threshold: + """ + super().__init__(region_names, session) + self.threshold = threshold + + def init_globalaws_limits_cache(self, region, services, options: LimitOptions): + # Cache services global and local services + LimitParameters( + session=self.session, region=region, services=services, options=options + ).init_globalaws_limits_cache() + + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + if not services: + services = [] + for service in ALLOWED_SERVICES_CODES: + services.append(service) + + for region in self.region_names: + limit_options = LimitOptions( + verbose=verbose, + filters=filters, + session=self.session, + region_name=region, + services=services, + threshold=self.threshold, + ) + self.init_globalaws_limits_cache( + region=region, services=services, options=limit_options + ) + + command_runner = AwsCommandRunner(services=services) + command_runner.run( + provider="limit", + options=limit_options, + diagram_builder=NoDiagram(), + title="AWS Limits - Region {}".format(region), + # pylint: disable=no-member + filename=limit_options.resulting_file_name("limit"), + ) diff --git a/cloudiscovery/provider/limit/data/allowed_resources.py b/cloudiscovery/provider/limit/data/allowed_resources.py new file mode 100644 index 0000000..25f1aa7 --- /dev/null +++ b/cloudiscovery/provider/limit/data/allowed_resources.py @@ -0,0 +1,651 @@ +ALLOWED_SERVICES_CODES = { + "acm": { + "L-F141DD1D": { + "method": "list_certificates", + "key": "CertificateSummaryList", + "fields": [], + }, + "global": False, + }, + "amplify": { + "L-1BED97F3": {"method": "list_apps", "key": "apps", "fields": [],}, + "global": False, + }, + "appmesh": { + "L-AC861A39": {"method": "list_meshes", "key": "meshes", "fields": [],}, + "global": False, + }, + "appsync": { + "L-06A0647C": { + "method": "list_graphql_apis", + "key": "graphqlApis", + "fields": [], + }, + "global": False, + }, + "autoscaling-plans": { + "L-BD401546": { + "method": "describe_scaling_plans", + "key": "ScalingPlans", + "fields": [], + }, + "global": False, + }, + "AWSCloudMap": { + "L-0FE3F50E": {"method": "list_namespaces", "key": "Namespaces", "fields": [],}, + "global": False, + }, + "batch": { + "L-144F0CA5": { + "method": "describe_compute_environments", + "key": "computeEnvironments", + "fields": [], + }, + "global": False, + }, + "chime": { + "L-8EE806B4": { + "method": "list_voice_connectors", + "key": "VoiceConnectors", + "fields": [], + }, + "L-32405DBA": { + "method": "list_phone_numbers", + "key": "PhoneNumbers", + "fields": [], + }, + "L-D3615084": { + "method": "list_voice_connector_groups", + "key": "VoiceConnectorGroups", + "fields": [], + }, + "global": True, + }, + "codebuild": { + "L-ACCF6C0D": {"method": "list_projects", "key": "projects", "fields": [],}, + "global": False, + }, + "codecommit": { + "L-81790602": { + "method": "list_repositories", + "key": "repositories", + "fields": [], + }, + "global": False, + }, + "cloudformation": { + "L-0485CB21": { + "method": "list_stacks", + "key": "StackSummaries", + "fields": [], + "filter": { + "StackStatusFilter": [ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_FAILED", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE", + "REVIEW_IN_PROGRESS", + "IMPORT_IN_PROGRESS", + "IMPORT_COMPLETE", + "IMPORT_ROLLBACK_IN_PROGRESS", + "IMPORT_ROLLBACK_FAILED", + "IMPORT_ROLLBACK_COMPLETE", + ] + }, + }, + "L-9DE8E4FB": {"method": "list_types", "key": "TypeSummaries", "fields": [],}, + "L-31709F13": {"method": "list_stack_sets", "key": "Summaries", "fields": [],}, + "global": False, + }, + "codeguru-reviewer": { + "L-F5129FC6": { + "method": "list_code_reviews", + "key": "CodeReviewSummaries", + "fields": [], + "filter": {"Type": "PullRequest"}, + }, + "global": False, + }, + "codeguru-profiler": { + "L-DA8D4E8D": { + "method": "list_profiling_groups", + "key": "profilingGroupNames", + "fields": [], + }, + "global": False, + }, + "dynamodb": { + "L-F98FE922": {"method": "list_tables", "key": "TableNames", "fields": [],}, + "global": False, + }, + "ec2": { + "L-0263D0A3": { + "method": "describe_addresses", + "key": "Addresses", + "fields": [], + }, + "L-74FC7D96": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": ["f1.2xlarge", "f1.4xlarge", "f1.16xlarge"], + } + ] + }, + }, + "L-DB2E81BA": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": [ + "g3s.xlarge", + "g3.4xlarge", + "g3.8xlarge", + "g3.16xlarge", + "g4dn.xlarge", + "g4dn.2xlarge", + "g4dn.4xlarge", + "g4dn.8xlarge", + "g4dn.16xlarge", + "g4dn.12xlarge", + "g4dn.metal", + ], + } + ] + }, + }, + "L-1945791B": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": [ + "inf1.xlarge", + "inf1.2xlarge", + "inf1.6xlarge", + "inf1.24xlarge", + ], + } + ] + }, + }, + "L-417A185B": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": [ + "p2.xlarge", + "p2.8xlarge", + "p2.16xlarge", + "p3.2xlarge", + "p3.8xlarge", + "p3.16xlarge", + "p3dn.24xlarge", + ], + } + ] + }, + }, + "L-1216C47A": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": [ + "c5d.large", + "c5d.xlarge", + "c5d.2xlarge", + "c5d.4xlarge", + "c5d.9xlarge", + "c5d.12xlarge", + "c5d.18xlarge", + "c5d.24xlarge", + "c5d.metal", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", + "c5n.large", + "c5n.xlarge", + "c5n.2xlarge", + "c5n.4xlarge", + "c5n.9xlarge", + "c5n.18xlarge", + "c5n.metal", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge", + "h1.2xlarge", + "h1.4xlarge", + "h1.8xlarge", + "h1.16xlarge", + "i3.large", + "i3.xlarge", + "i3.2xlarge", + "i3.4xlarge", + "i3.8xlarge", + "i3.16xlarge", + "i3.metal", + "m6g.medium", + "m6g.large", + "m6g.xlarge", + "m6g.2xlarge", + "m6g.4xlarge", + "m6g.8xlarge", + "m6g.12xlarge", + "m6g.16xlarge", + "m6g.metal", + "m5.large", + "m5.xlarge", + "m5.2xlarge", + "m5.4xlarge", + "m5.8xlarge", + "m5.12xlarge", + "m5.16xlarge", + "m5.24xlarge", + "m5.metal", + "m5d.large", + "m5d.xlarge", + "m5d.2xlarge", + "m5d.4xlarge", + "m5d.8xlarge", + "m5d.12xlarge", + "m5d.16xlarge", + "m5d.24xlarge", + "m5d.metal", + "m5a.large", + "m5a.xlarge", + "m5a.2xlarge", + "m5a.4xlarge", + "m5a.8xlarge", + "m5a.12xlarge", + "m5a.16xlarge", + "m5a.24xlarge", + "m5ad.large", + "m5ad.xlarge", + "m5ad.2xlarge", + "m5ad.4xlarge", + "m5ad.12xlarge", + "m5ad.24xlarge", + "m5n.large", + "m5n.xlarge", + "m5n.2xlarge", + "m5n.4xlarge", + "m5n.8xlarge", + "m5n.12xlarge", + "m5n.16xlarge", + "m5n.24xlarge", + "m5dn.large", + "m5dn.xlarge", + "m5dn.2xlarge", + "m5dn.4xlarge", + "m5dn.8xlarge", + "m5dn.12xlarge", + "m5dn.16xlarge", + "m5dn.24xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "m4.16xlarge", + "z1d.large", + "z1d.xlarge", + "z1d.2xlarge", + "z1d.3xlarge", + "z1d.6xlarge", + "z1d.12xlarge", + "z1d.metal", + "r6g.medium", + "r6g.large", + "r6g.xlarge", + "r6g.2xlarge", + "r6g.4xlarge", + "r6g.8xlarge", + "r6g.12xlarge", + "r6g.16xlarge", + "r6g.metal", + "r5.large", + "r5.xlarge", + "r5.2xlarge", + "r5.4xlarge", + "r5.8xlarge", + "r5.12xlarge", + "r5.16xlarge", + "r5.24xlarge", + "r5.metal", + "r5d.large", + "r5d.xlarge", + "r5d.2xlarge", + "r5d.4xlarge", + "r5d.8xlarge", + "r5d.12xlarge", + "r5d.16xlarge", + "r5d.24xlarge", + "r5d.metal", + "r5a.large", + "r5a.xlarge", + "r5a.2xlarge", + "r5a.4xlarge", + "r5a.8xlarge", + "r5a.12xlarge", + "r5a.16xlarge", + "r5a.24xlarge", + "r5ad.large", + "r5ad.xlarge", + "r5ad.2xlarge", + "r5ad.4xlarge", + "r5ad.12xlarge", + "r5ad.24xlarge", + "r5n.large", + "r5n.xlarge", + "r5n.2xlarge", + "r5n.4xlarge", + "r5n.8xlarge", + "r5n.12xlarge", + "r5n.16xlarge", + "r5n.24xlarge", + "r5dn.large", + "r5dn.xlarge", + "r5dn.2xlarge", + "r5dn.4xlarge", + "r5dn.8xlarge", + "r5dn.12xlarge", + "r5dn.16xlarge", + "r5dn.24xlarge", + "r4.large", + "r4.xlarge", + "r4.2xlarge", + "r4.4xlarge", + "r4.8xlarge", + "r4.16xlarge", + "t3.nano", + "t3.micro", + "t3.small", + "t3.medium", + "t3.large", + "t3.xlarge", + "t3.2xlarge", + "t3a.nano", + "t3a.micro", + "t3a.small", + "t3a.medium", + "t3a.large", + "t3a.xlarge", + "t3a.2xlarge", + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "t2.xlarge", + "t2.2xlarge", + ], + } + ] + }, + }, + "L-7295265B": { + "method": "describe_instances", + "key": "Reservations", + "fields": [], + "filter": { + "Filters": [ + { + "Name": "instance-type", + "Values": [ + "x1e.xlarge", + "x1e.2xlarge", + "x1e.4xlarge", + "x1e.8xlarge", + "x1e.16xlarge", + "x1e.32xlarge", + "x1.16xlarge", + "x1.32xlarge", + ], + } + ] + }, + }, + "global": False, + }, + "ecs": { + "L-21C621EB": {"method": "list_clusters", "key": "clusterArns", "fields": [],}, + "global": False, + }, + "elasticfilesystem": { + "L-848C634D": { + "method": "describe_file_systems", + "key": "FileSystems", + "fields": [], + }, + "global": False, + }, + "elasticbeanstalk": { + "L-8EFC1C51": { + "method": "describe_environments", + "key": "Environments", + "fields": [], + }, + "L-1CEABD17": { + "method": "describe_applications", + "key": "Applications", + "fields": [], + }, + "L-D64F1F14": { + "method": "describe_application_versions", + "key": "ApplicationVersions", + "fields": [], + }, + "global": False, + }, + "elasticloadbalancing": { + "L-53DA6B97": { + "method": "describe_load_balancers", + "key": "LoadBalancers", + "fields": [], + }, + "global": False, + }, + "glue": { + "L-F953935E": {"method": "get_databases", "key": "DatabaseList", "fields": [],}, + "L-D987EC31": { + "method": "get_user_defined_functions", + "key": "UserDefinedFunctions", + "fields": [], + "filter": {"Pattern": "*"}, + }, + "L-83192DBF": { + "method": "get_security_configurations", + "key": "SecurityConfigurations", + "fields": [], + }, + "L-F1653A6D": {"method": "get_triggers", "key": "Triggers", "fields": [],}, + "L-11FA2C1A": {"method": "get_crawlers", "key": "Crawlers", "fields": [],}, + "L-7DD7C33A": {"method": "list_workflows", "key": "Workflows", "fields": [],}, + "L-04CEE988": { + "method": "list_ml_transforms", + "key": "TransformIds", + "fields": [], + }, + "global": False, + }, + "iam": { + "L-F4A5425F": {"method": "list_groups", "key": "Groups", "fields": [],}, + "L-F55AF5E4": {"method": "list_users", "key": "Users", "fields": [],}, + "L-BF35879D": { + "method": "list_server_certificates", + "key": "ServerCertificateMetadataList", + "fields": [], + }, + "L-6E65F664": { + "method": "list_instance_profiles", + "key": "InstanceProfiles", + "fields": [], + "paginate": False, + }, + "L-FE177D64": {"method": "list_roles", "key": "Roles", "fields": [],}, + "L-DB618D39": { + "method": "list_saml_providers", + "key": "SAMLProviderList", + "fields": [], + }, + "global": True, + }, + "kms": { + "L-C2F1777E": {"method": "list_keys", "key": "Keys", "fields": [],}, + "L-2601EE20": {"method": "list_aliases", "key": "Aliases", "fields": [],}, + "global": False, + }, + "mediaconnect": { + "L-A99016A8": {"method": "list_flows", "key": "Flows", "fields": [],}, + "L-F1F62F5D": { + "method": "list_entitlements", + "key": "Entitlements", + "fields": [], + }, + "global": False, + }, + "medialive": { + "L-D1AFAF75": {"method": "list_channels", "key": "Channels", "fields": [],}, + "L-BDF24E14": { + "method": "list_input_devices", + "key": "InputDevices", + "fields": [], + }, + "global": False, + }, + "mediapackage": { + "L-352B8598": {"method": "list_channels", "key": "Channels", "fields": [],}, + "global": False, + }, + "qldb": { + "L-CD70CADB": {"method": "list_ledgers", "key": "Ledgers", "fields": [],}, + "global": False, + }, + "robomaker": { + "L-40FACCBF": {"method": "list_robots", "key": "robots", "fields": [],}, + "L-D6554FB1": { + "method": "list_simulation_applications", + "key": "simulationApplicationSummaries", + "fields": [], + }, + "global": False, + }, + "route53": { + "L-4EA4796A": { + "method": "list_hosted_zones", + "key": "HostedZones", + "fields": [], + }, + "L-ACB674F3": { + "method": "list_health_checks", + "key": "HealthChecks", + "fields": [], + }, + "global": True, + }, + "route53resolver": { + "L-4A669CC0": { + "method": "list_resolver_endpoints", + "key": "ResolverEndpoints", + "fields": [], + }, + "L-51D8A1FB": { + "method": "list_resolver_rules", + "key": "ResolverRules", + "fields": [], + }, + "global": True, + }, + "rds": { + "L-7B6409FD": { + "method": "describe_db_instances", + "key": "DBInstances", + "fields": [], + }, + "L-952B80B8": { + "method": "describe_db_clusters", + "key": "DBClusters", + "fields": [], + }, + "L-DE55804A": { + "method": "describe_db_parameter_groups", + "key": "DBParameterGroups", + "fields": [], + }, + "L-9FA33840": { + "method": "describe_option_groups", + "key": "OptionGroupsList", + "fields": [], + }, + "global": False, + }, + "s3": { + "L-DC2B2D3D": {"method": "list_buckets", "key": "Buckets", "fields": [],}, + "global": False, + }, + "sns": { + "L-61103206": {"method": "list_topics", "key": "Topics", "fields": [],}, + "global": False, + }, + "transcribe": { + "L-3278D334": { + "method": "list_vocabularies", + "key": "Vocabularies", + "fields": [], + }, + "global": False, + }, + "translate": { + "L-4011ABD8": { + "method": "list_terminologies", + "key": "TerminologyPropertiesList", + "fields": [], + }, + "global": False, + }, + "vpc": { + "L-F678F1CE": {"method": "describe_vpcs", "key": "Vpcs", "fields": [],}, + "global": False, + }, +} diff --git a/cloudiscovery/provider/limit/resource/__init__.py b/cloudiscovery/provider/limit/resource/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cloudiscovery/provider/limit/resource/all.py b/cloudiscovery/provider/limit/resource/all.py new file mode 100644 index 0000000..e2690d0 --- /dev/null +++ b/cloudiscovery/provider/limit/resource/all.py @@ -0,0 +1,179 @@ +from typing import List + +from concurrent.futures.thread import ThreadPoolExecutor + +from provider.limit.command import LimitOptions +from provider.limit.data.allowed_resources import ALLOWED_SERVICES_CODES +from shared.common import ( + ResourceProvider, + Resource, + ResourceDigest, + message_handler, + ResourceCache, + LimitsValues, +) +from shared.common_aws import get_paginator +from shared.error_handler import exception + +SERVICEQUOTA_TO_BOTO3 = { + "elasticloadbalancing": "elbv2", + "elasticfilesystem": "efs", + "vpc": "ec2", + "codeguru-profiler": "codeguruprofiler", + "AWSCloudMap": "servicediscovery", +} + +MAX_EXECUTION_PARALLEL = 2 + + +class LimitResources(ResourceProvider): + def __init__(self, options: LimitOptions): + """ + All resources + + :param options: + """ + super().__init__() + self.options = options + self.cache = ResourceCache() + + @exception + # pylint: disable=too-many-locals + def get_resources(self) -> List[Resource]: + + threshold_requested = ( + 0 if self.options.threshold is None else self.options.threshold + ) + + client_quota = self.options.session.client("service-quotas") + + resources_found = [] + + services = self.options.services + + with ThreadPoolExecutor(MAX_EXECUTION_PARALLEL) as executor: + results = executor.map( + lambda service_name: self.analyze_service( + service_name=service_name, + client_quota=client_quota, + threshold_requested=int(threshold_requested), + ), + services, + ) + + for result in results: + if result is not None: + resources_found.extend(result) + + return resources_found + + @exception + def analyze_service(self, service_name, client_quota, threshold_requested): + cache_key = "aws_limits_" + service_name + "_" + self.options.region_name + cache = self.cache.get_key(cache_key) + resources_found = [] + if service_name not in cache: + return [] + + for data_quota_code in cache[service_name]: + if data_quota_code is None: + continue + resource_found = self.analyze_quota( + client_quota=client_quota, + data_quota_code=data_quota_code, + service=service_name, + threshold_requested=threshold_requested, + ) + if resource_found is not None: + resources_found.append(resource_found) + return resources_found + + @exception + # pylint: disable=too-many-locals + def analyze_quota( + self, client_quota, data_quota_code, service, threshold_requested + ): + resource_found = None + quota_data = ALLOWED_SERVICES_CODES[service][data_quota_code["quota_code"]] + + value_aws = value = data_quota_code["value"] + + # Quota is adjustable by ticket request, then must override this values. + if bool(data_quota_code["adjustable"]) is True: + try: + response_quota = client_quota.get_service_quota( + ServiceCode=service, QuotaCode=data_quota_code["quota_code"] + ) + if "Value" in response_quota["Quota"]: + value = response_quota["Quota"]["Value"] + else: + value = data_quota_code["value"] + except client_quota.exceptions.NoSuchResourceException: + value = data_quota_code["value"] + + if self.options.verbose: + message_handler( + "Collecting data from Quota: " + + service + + " - " + + data_quota_code["quota_name"] + + "...", + "HEADER", + ) + + # Need to convert some quota-services endpoint + if service in SERVICEQUOTA_TO_BOTO3: + service = SERVICEQUOTA_TO_BOTO3.get(service) + + client = self.options.session.client( + service, region_name=self.options.region_name + ) + + usage = 0 + + # Check filters by resource + if "filter" in quota_data: + filters = quota_data["filter"] + else: + filters = None + + pages = get_paginator( + client=client, + operation_name=quota_data["method"], + resource_type="aws_limit", + filters=filters, + ) + if not pages: + if filters: + response = getattr(client, quota_data["method"])(**filters) + else: + response = getattr(client, quota_data["method"])() + usage = len(response[quota_data["key"]]) + else: + for page in pages: + usage = usage + len(page[quota_data["key"]]) + + try: + percent = round((usage / value) * 100, 2) + except ZeroDivisionError: + percent = 0 + + if percent >= threshold_requested: + resource_found = Resource( + digest=ResourceDigest( + id=data_quota_code["quota_code"], type="aws_limit" + ), + name="", + group="", + limits=LimitsValues( + quota_name=data_quota_code["quota_name"], + quota_code=data_quota_code["quota_code"], + aws_limit=int(value_aws), + local_limit=int(value), + usage=int(usage), + service=service, + percent=percent, + ), + ) + + return resource_found diff --git a/cloudiscovery/provider/policy/command.py b/cloudiscovery/provider/policy/command.py index 561cb59..561683f 100644 --- a/cloudiscovery/provider/policy/command.py +++ b/cloudiscovery/provider/policy/command.py @@ -1,16 +1,37 @@ +from typing import List + from provider.policy.diagram import PolicyDiagram -from shared.command import BaseCommand, CommandRunner -from shared.common import BaseAwsOptions + +from shared.common import Filterable, BaseOptions +from shared.common_aws import BaseAwsOptions, BaseAwsCommand, AwsCommandRunner from shared.diagram import NoDiagram -class Policy(BaseCommand): - def run(self): +class PolicyOptions(BaseAwsOptions, BaseOptions): + def __init__(self, verbose, filters, session, region_name): + BaseAwsOptions.__init__(self, session, region_name) + BaseOptions.__init__(self, verbose, filters) + + +class Policy(BaseAwsCommand): + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): for region in self.region_names: - options = BaseAwsOptions(session=self.session, region_name=region) + self.init_region_cache(region) + options = PolicyOptions( + verbose=verbose, + filters=filters, + session=self.session, + region_name=region, + ) - command_runner = CommandRunner(self.filters) - if self.diagram: + command_runner = AwsCommandRunner(filters) + if diagram: diagram = PolicyDiagram() else: diagram = NoDiagram() diff --git a/cloudiscovery/provider/policy/resource/general.py b/cloudiscovery/provider/policy/resource/general.py index a01e55d..5fbefaa 100644 --- a/cloudiscovery/provider/policy/resource/general.py +++ b/cloudiscovery/provider/policy/resource/general.py @@ -1,30 +1,35 @@ from typing import List -from shared.common import BaseAwsOptions, resource_tags +from provider.policy.command import PolicyOptions from shared.common import ( ResourceProvider, Resource, message_handler, ResourceDigest, ResourceEdge, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception class IamUser(ResourceProvider): - def __init__(self, options: BaseAwsOptions): + @ResourceAvailable(services="iam") + def __init__(self, options: PolicyOptions): """ Iam user :param options: """ super().__init__() + self.options = options self.client = options.client("iam") self.users_found: List[Resource] = [] @exception def get_resources(self) -> List[Resource]: - message_handler("Collecting data from IAM Users...", "HEADER") + if self.options.verbose: + message_handler("Collecting data from IAM Users...", "HEADER") paginator = self.client.get_paginator("list_users") pages = paginator.paginate() diff --git a/cloudiscovery/provider/policy/resource/security.py b/cloudiscovery/provider/policy/resource/security.py index b605b80..4d18c11 100644 --- a/cloudiscovery/provider/policy/resource/security.py +++ b/cloudiscovery/provider/policy/resource/security.py @@ -1,19 +1,20 @@ from concurrent.futures.thread import ThreadPoolExecutor from typing import List -from shared.common import BaseAwsOptions, resource_tags +from provider.policy.command import PolicyOptions from shared.common import ( ResourceProvider, Resource, message_handler, ResourceDigest, ResourceEdge, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception class Principals: - # Source: https://gist.github.com/shortjared/4c1e3fe52bdfa47522cfe5b41e5d6f22 principals = { "a4b.amazonaws.com": { @@ -291,7 +292,7 @@ class Principals: "name": "ECS Application Autoscaling", "group": "network", }, - "edgelambda.lambda.amazonaws.com": { + "edgelambda.amazonaws.com": { "type": "aws_lambda_function", "name": "Lambda@Edge", "group": "compute", @@ -811,7 +812,7 @@ class Principals: class IamPolicy(ResourceProvider): - def __init__(self, options: BaseAwsOptions): + def __init__(self, options: PolicyOptions): """ Iam policy @@ -821,9 +822,11 @@ def __init__(self, options: BaseAwsOptions): self.options = options @exception + @ResourceAvailable(services="iam") def get_resources(self) -> List[Resource]: client = self.options.client("iam") - message_handler("Collecting data from IAM Policies...", "HEADER") + if self.options.verbose: + message_handler("Collecting data from IAM Policies...", "HEADER") resources_found = [] @@ -852,20 +855,23 @@ def build_policy(data): class IamGroup(ResourceProvider): - def __init__(self, options: BaseAwsOptions): + @ResourceAvailable(services="iam") + def __init__(self, options: PolicyOptions): """ Iam group :param options: """ super().__init__() + self.options = options self.client = options.client("iam") self.resources_found: List[Resource] = [] @exception def get_resources(self) -> List[Resource]: - message_handler("Collecting data from IAM Groups...", "HEADER") + if self.options.verbose: + message_handler("Collecting data from IAM Groups...", "HEADER") paginator = self.client.get_paginator("list_groups") pages = paginator.paginate() @@ -913,20 +919,23 @@ def analyze_relations(self, resource): class IamRole(ResourceProvider): - def __init__(self, options: BaseAwsOptions): + @ResourceAvailable(services="iam") + def __init__(self, options: PolicyOptions): """ Iam role :param options: """ super().__init__() + self.options = options self.client = options.client("iam") self.resources_found: List[Resource] = [] @exception def get_resources(self) -> List[Resource]: - message_handler("Collecting data from IAM Roles...", "HEADER") + if self.options.verbose: + message_handler("Collecting data from IAM Roles...", "HEADER") paginator = self.client.get_paginator("list_roles") pages = paginator.paginate() @@ -1027,7 +1036,7 @@ def analyze_role_relations(self, resource: Resource): class InstanceProfile(ResourceProvider): - def __init__(self, vpc_options: BaseAwsOptions): + def __init__(self, vpc_options: PolicyOptions): """ Instance profile @@ -1039,7 +1048,8 @@ def __init__(self, vpc_options: BaseAwsOptions): @exception def get_resources(self) -> List[Resource]: - message_handler("Collecting data from Instance Profiles...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Instance Profiles...", "HEADER") paginator = self.vpc_options.client("iam").get_paginator( "list_instance_profiles" ) diff --git a/cloudiscovery/provider/vpc/command.py b/cloudiscovery/provider/vpc/command.py index 083cf1e..f8911d1 100644 --- a/cloudiscovery/provider/vpc/command.py +++ b/cloudiscovery/provider/vpc/command.py @@ -1,48 +1,45 @@ +from typing import List + from ipaddress import ip_network from provider.vpc.diagram import VpcDiagram -from shared.command import CommandRunner, BaseCommand from shared.common import ( - BaseAwsOptions, ResourceDigest, VPCE_REGEX, SOURCE_IP_ADDRESS_REGEX, + Filterable, + BaseOptions, ) +from shared.common_aws import BaseAwsOptions, BaseAwsCommand, AwsCommandRunner from shared.diagram import NoDiagram, BaseDiagram -class VpcOptions(BaseAwsOptions): +class VpcOptions(BaseAwsOptions, BaseOptions): vpc_id: str - def __new__(cls, session, region_name, vpc_id): - """ - VPC Options - - :param session: - :param region_name: - :param vpc_id: - """ - self = super(BaseAwsOptions, cls).__new__(cls, (session, region_name)) + # pylint: disable=too-many-arguments + def __init__( + self, verbose: bool, filters: List[Filterable], session, region_name, vpc_id + ): + BaseAwsOptions.__init__(self, session, region_name) + BaseOptions.__init__(self, verbose, filters) self.vpc_id = vpc_id - return self def vpc_digest(self): return ResourceDigest(id=self.vpc_id, type="aws_vpc") -class Vpc(BaseCommand): +class Vpc(BaseAwsCommand): # pylint: disable=too-many-arguments - def __init__(self, vpc_id, region_names, session, diagram, filters): + def __init__(self, vpc_id, region_names, session): """ VPC command :param vpc_id: :param region_names: :param session: - :param diagram: - :param filters: """ - super().__init__(region_names, session, diagram, filters) + super().__init__(region_names, session) self.vpc_id = vpc_id @staticmethod @@ -64,10 +61,18 @@ def check_vpc(vpc_options: VpcOptions): ) print(message) - def run(self): - command_runner = CommandRunner(self.filters) + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + # pylint: disable=too-many-branches + command_runner = AwsCommandRunner(filters) for region in self.region_names: + self.init_region_cache(region) # if vpc is none, get all vpcs and check if self.vpc_id is None: @@ -76,11 +81,15 @@ def run(self): for data in vpcs["Vpcs"]: vpc_id = data["VpcId"] vpc_options = VpcOptions( - session=self.session, region_name=region, vpc_id=vpc_id, + verbose=verbose, + filters=filters, + session=self.session, + region_name=region, + vpc_id=vpc_id, ) self.check_vpc(vpc_options) diagram_builder: BaseDiagram - if self.diagram: + if diagram: diagram_builder = VpcDiagram(vpc_id=vpc_id) else: diagram_builder = NoDiagram() @@ -93,11 +102,15 @@ def run(self): ) else: vpc_options = VpcOptions( - session=self.session, region_name=region, vpc_id=self.vpc_id, + verbose=verbose, + filters=filters, + session=self.session, + region_name=region, + vpc_id=self.vpc_id, ) self.check_vpc(vpc_options) - if self.diagram: + if diagram: diagram_builder = VpcDiagram(vpc_id=self.vpc_id) else: diagram_builder = NoDiagram() diff --git a/cloudiscovery/provider/vpc/resource/analytics.py b/cloudiscovery/provider/vpc/resource/analytics.py index 85cc8ad..89104fd 100644 --- a/cloudiscovery/provider/vpc/resource/analytics.py +++ b/cloudiscovery/provider/vpc/resource/analytics.py @@ -2,6 +2,7 @@ from typing import List from provider.vpc.command import VpcOptions, check_ipvpc_inpolicy +from provider.vpc.resource.database import RDS from shared.common import ( datetime_to_string, ResourceProvider, @@ -9,8 +10,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -25,6 +27,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="es") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("es") @@ -33,63 +36,125 @@ def get_resources(self) -> List[Resource]: response = client.list_domain_names() - message_handler("Collecting data from Elasticsearch Domains...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Elasticsearch Domains...", "HEADER") - if len(response["DomainNames"]) > 0: + for data in response["DomainNames"]: - for data in response["DomainNames"]: + elasticsearch_domain = client.describe_elasticsearch_domain( + DomainName=data["DomainName"] + ) - elasticsearch_domain = client.describe_elasticsearch_domain( - DomainName=data["DomainName"] - ) + documentpolicy = elasticsearch_domain["DomainStatus"]["AccessPolicies"] - documentpolicy = elasticsearch_domain["DomainStatus"]["AccessPolicies"] + document = json.dumps(documentpolicy, default=datetime_to_string) - document = json.dumps(documentpolicy, default=datetime_to_string) + # check either vpc_id or potencial subnet ip are found + ipvpc_found = check_ipvpc_inpolicy( + document=document, vpc_options=self.vpc_options + ) - # check either vpc_id or potencial subnet ip are found - ipvpc_found = check_ipvpc_inpolicy( - document=document, vpc_options=self.vpc_options + # elasticsearch uses accesspolicies too, so check both situation + if ( + elasticsearch_domain["DomainStatus"]["VPCOptions"]["VPCId"] + == self.vpc_options.vpc_id + or ipvpc_found is True + ): + list_tags_response = client.list_tags( + ARN=elasticsearch_domain["DomainStatus"]["ARN"] ) - - # elasticsearch uses accesspolicies too, so check both situation - if ( - elasticsearch_domain["DomainStatus"]["VPCOptions"]["VPCId"] - == self.vpc_options.vpc_id - or ipvpc_found is True - ): - list_tags_response = client.list_tags( - ARN=elasticsearch_domain["DomainStatus"]["ARN"] + digest = ResourceDigest( + id=elasticsearch_domain["DomainStatus"]["DomainId"], + type="aws_elasticsearch_domain", + ) + resources_found.append( + Resource( + digest=digest, + name=elasticsearch_domain["DomainStatus"]["DomainName"], + details="", + group="analytics", + tags=resource_tags(list_tags_response), + ) + ) + for subnet_id in elasticsearch_domain["DomainStatus"]["VPCOptions"][ + "SubnetIds" + ]: + self.relations_found.append( + ResourceEdge( + from_node=digest, + to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), + ) ) + return resources_found + + +class MSK(ResourceProvider): + def __init__(self, vpc_options: VpcOptions): + """ + Msk + + :param vpc_options: + """ + super().__init__() + self.vpc_options = vpc_options + + @exception + @ResourceAvailable(services="kafka") + def get_resources(self) -> List[Resource]: + + client = self.vpc_options.client("kafka") + + resources_found = [] + + # get all cache clusters + response = client.list_clusters() + + if self.vpc_options.verbose: + message_handler("Collecting data from MSK Clusters...", "HEADER") + + # iterate cache clusters to get subnet groups + for data in response["ClusterInfoList"]: + + msk_subnets = ", ".join(data["BrokerNodeGroupInfo"]["ClientSubnets"]) + + ec2 = self.vpc_options.session.resource( + "ec2", region_name=self.vpc_options.region_name + ) + + filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] + + subnets = ec2.subnets.filter(Filters=filters) + + for subnet in list(subnets): + + if subnet.id in msk_subnets: digest = ResourceDigest( - id=elasticsearch_domain["DomainStatus"]["DomainId"], - type="aws_elasticsearch_domain", + id=data["ClusterArn"], type="aws_msk_cluster" ) resources_found.append( Resource( digest=digest, - name=elasticsearch_domain["DomainStatus"]["DomainName"], + name=data["ClusterName"], details="", group="analytics", - tags=resource_tags(list_tags_response), + tags=resource_tags(data), ) ) - for subnet_id in elasticsearch_domain["DomainStatus"]["VPCOptions"][ - "SubnetIds" - ]: - self.relations_found.append( - ResourceEdge( - from_node=digest, - to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), - ) + self.relations_found.append( + ResourceEdge( + from_node=digest, + to_node=ResourceDigest(id=subnet.id, type="aws_subnet"), ) + ) + + break return resources_found -class MSK(ResourceProvider): +class QUICKSIGHT(ResourceProvider): def __init__(self, vpc_options: VpcOptions): """ - Msk + Quicksight :param vpc_options: """ @@ -97,53 +162,84 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="quicksight") def get_resources(self) -> List[Resource]: - client = self.vpc_options.client("kafka") + client = self.vpc_options.client("quicksight") resources_found = [] - # get all cache clusters - response = client.list_clusters() + # Get accountid + account_id = self.vpc_options.account_number() - message_handler("Collecting data from MSK Clusters...", "HEADER") + response = client.list_data_sources(AwsAccountId=account_id) - if len(response["ClusterInfoList"]) > 0: + if self.vpc_options.verbose: + message_handler("Collecting data from Quicksight...", "HEADER") - # iterate cache clusters to get subnet groups - for data in response["ClusterInfoList"]: + for data in response["DataSources"]: - msk_subnets = ", ".join(data["BrokerNodeGroupInfo"]["ClientSubnets"]) + # Twitter and S3 data source is not supported + if data["Type"] not in ("TWITTER", "S3", "ATHENA"): - ec2 = self.vpc_options.session.resource( - "ec2", region_name=self.vpc_options.region_name + data_source = client.describe_data_source( + AwsAccountId=account_id, DataSourceId=data["DataSourceId"] ) - filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] + if "RdsParameters" in data_source["DataSource"]["DataSourceParameters"]: - subnets = ec2.subnets.filter(Filters=filters) + instance_id = data_source["DataSource"]["DataSourceParameters"][ + "RdsParameters" + ]["InstanceId"] + rds = RDS(self.vpc_options).get_resources(instance_id=instance_id) - for subnet in list(subnets): + if rds: - if subnet.id in msk_subnets: - digest = ResourceDigest( - id=data["ClusterArn"], type="aws_msk_cluster" + quicksight_digest = ResourceDigest( + id=data["DataSourceId"], type="aws_quicksight" ) resources_found.append( Resource( - digest=digest, - name=data["ClusterName"], + digest=quicksight_digest, + name=data["Name"], details="", group="analytics", tags=resource_tags(data), ) ) + + self.relations_found.append( + ResourceEdge( + from_node=quicksight_digest, to_node=rds[0].digest, + ) + ) + + if "VpcConnectionProperties" in data_source: + + if ( + self.vpc_options.vpc_id + in data_source["VpcConnectionProperties"]["VpcConnectionArn"] + ): + quicksight_digest = ResourceDigest( + id=data["DataSourceId"], type="aws_quicksight" + ) + resources_found.append( + Resource( + digest=quicksight_digest, + name=data["DataSourceId"], + details="", + group="analytics", + tags=resource_tags(data), + ) + ) + self.relations_found.append( ResourceEdge( - from_node=digest, - to_node=ResourceDigest(id=subnet.id, type="aws_subnet"), + from_node=quicksight_digest, + to_node=ResourceDigest( + id=self.vpc_options.vpc_id, type="aws_vpc" + ), ) ) - break return resources_found diff --git a/cloudiscovery/provider/vpc/resource/application.py b/cloudiscovery/provider/vpc/resource/application.py index a0e7d54..55c9cf6 100644 --- a/cloudiscovery/provider/vpc/resource/application.py +++ b/cloudiscovery/provider/vpc/resource/application.py @@ -10,8 +10,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -26,6 +27,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="sqs") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sqs") @@ -34,7 +36,8 @@ def get_resources(self) -> List[Resource]: response = client.list_queues() - message_handler("Collecting data from SQS Queue Policy...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from SQS Queue Policy...", "HEADER") if "QueueUrls" in response: diff --git a/cloudiscovery/provider/vpc/resource/compute.py b/cloudiscovery/provider/vpc/resource/compute.py index 988855e..92e0450 100644 --- a/cloudiscovery/provider/vpc/resource/compute.py +++ b/cloudiscovery/provider/vpc/resource/compute.py @@ -7,11 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - get_name_tag, - get_tag, - resource_tags, + ResourceAvailable, ) -from shared.common_aws import describe_subnet +from shared.common_aws import describe_subnet, resource_tags, get_name_tag, get_tag from shared.error_handler import exception @@ -26,10 +24,12 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="lambda") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("lambda") - message_handler("Collecting data from Lambda Functions...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Lambda Functions...", "HEADER") paginator = client.get_paginator("list_functions") pages = paginator.paginate() @@ -77,6 +77,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -85,7 +86,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_instances() - message_handler("Collecting data from EC2 Instances...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from EC2 Instances...", "HEADER") for data in response["Reservations"]: for instances in data["Instances"]: @@ -143,6 +145,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="eks") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("eks") @@ -151,7 +154,8 @@ def get_resources(self) -> List[Resource]: response = client.list_clusters() - message_handler("Collecting data from EKS Clusters...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from EKS Clusters...", "HEADER") for data in response["clusters"]: @@ -195,6 +199,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="emr") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("emr") @@ -203,7 +208,8 @@ def get_resources(self) -> List[Resource]: response = client.list_clusters() - message_handler("Collecting data from EMR Clusters...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from EMR Clusters...", "HEADER") for data in response["Clusters"]: @@ -254,6 +260,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="autoscaling") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("autoscaling") @@ -262,7 +269,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_auto_scaling_groups() - message_handler("Collecting data from Autoscaling Groups...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Autoscaling Groups...", "HEADER") for data in response["AutoScalingGroups"]: diff --git a/cloudiscovery/provider/vpc/resource/containers.py b/cloudiscovery/provider/vpc/resource/containers.py index 9612b2f..c261fa2 100644 --- a/cloudiscovery/provider/vpc/resource/containers.py +++ b/cloudiscovery/provider/vpc/resource/containers.py @@ -7,9 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) -from shared.common_aws import describe_subnet +from shared.common_aws import describe_subnet, resource_tags from shared.error_handler import exception @@ -24,6 +24,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ecs") # pylint: disable=too-many-locals,too-many-branches def get_resources(self) -> List[Resource]: @@ -37,115 +38,110 @@ def get_resources(self) -> List[Resource]: clusters=clusters_list["clusterArns"], include=["TAGS"] ) - message_handler("Collecting data from ECS Cluster...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from ECS Cluster...", "HEADER") # pylint: disable=too-many-nested-blocks - if len(response["clusters"]) > 0: - - for data in response["clusters"]: - - # Searching all cluster services - paginator = client.get_paginator("list_services") - pages = paginator.paginate(cluster=data["clusterName"]) - - for services in pages: - if len(services["serviceArns"]) > 0: - service_details = client.describe_services( - cluster=data["clusterName"], - services=services["serviceArns"], - ) - - for data_service_detail in service_details["services"]: - if data_service_detail["launchType"] == "FARGATE": - service_subnets = data_service_detail[ - "networkConfiguration" - ]["awsvpcConfiguration"]["subnets"] - - # Using subnet to check VPC - subnets = describe_subnet( - vpc_options=self.vpc_options, - subnet_ids=service_subnets, - ) - - if subnets is not None: - # Iterate subnet to get VPC - for data_subnet in subnets["Subnets"]: - - if ( - data_subnet["VpcId"] - == self.vpc_options.vpc_id - ): - cluster_digest = ResourceDigest( - id=data["clusterArn"], - type="aws_ecs_cluster", - ) - resources_found.append( - Resource( - digest=cluster_digest, - name=data["clusterName"], - details="", - group="container", - tags=resource_tags(data), - ) - ) - self.relations_found.append( - ResourceEdge( - from_node=cluster_digest, - to_node=ResourceDigest( - id=data_subnet["SubnetId"], - type="aws_subnet", - ), - ) - ) - else: - # EC2 services require container instances, list of them should be fine for now - pass - - # Looking for container instances - they are dynamically associated, so manual review is necessary - list_paginator = client.get_paginator("list_container_instances") - list_pages = list_paginator.paginate(cluster=data["clusterName"]) - for list_page in list_pages: - if len(list_page["containerInstanceArns"]) == 0: - continue - - container_instances = client.describe_container_instances( - cluster=data["clusterName"], - containerInstances=list_page["containerInstanceArns"], + for data in response["clusters"]: + + # Searching all cluster services + paginator = client.get_paginator("list_services") + pages = paginator.paginate(cluster=data["clusterName"]) + + for services in pages: + if len(services["serviceArns"]) > 0: + service_details = client.describe_services( + cluster=data["clusterName"], services=services["serviceArns"], ) - ec2_ids = [] - for instance_details in container_instances["containerInstances"]: - ec2_ids.append(instance_details["ec2InstanceId"]) - paginator = ec2_client.get_paginator("describe_instances") - pages = paginator.paginate(InstanceIds=ec2_ids) - for page in pages: - for reservation in page["Reservations"]: - for instance in reservation["Instances"]: - for network_interfaces in instance["NetworkInterfaces"]: - if ( - network_interfaces["VpcId"] - == self.vpc_options.vpc_id - ): - cluster_instance_digest = ResourceDigest( - id=instance["InstanceId"], + + for data_service_detail in service_details["services"]: + if data_service_detail["launchType"] == "FARGATE": + service_subnets = data_service_detail[ + "networkConfiguration" + ]["awsvpcConfiguration"]["subnets"] + + # Using subnet to check VPC + subnets = describe_subnet( + vpc_options=self.vpc_options, + subnet_ids=service_subnets, + ) + + if subnets is not None: + # Iterate subnet to get VPC + for data_subnet in subnets["Subnets"]: + + if data_subnet["VpcId"] == self.vpc_options.vpc_id: + cluster_digest = ResourceDigest( + id=data["clusterArn"], type="aws_ecs_cluster", ) resources_found.append( Resource( - digest=cluster_instance_digest, + digest=cluster_digest, name=data["clusterName"], - details="Instance in EC2 cluster", + details="", group="container", tags=resource_tags(data), ) ) self.relations_found.append( ResourceEdge( - from_node=cluster_instance_digest, + from_node=cluster_digest, to_node=ResourceDigest( - id=instance["InstanceId"], - type="aws_instance", + id=data_subnet["SubnetId"], + type="aws_subnet", ), ) ) + else: + # EC2 services require container instances, list of them should be fine for now + pass + + # Looking for container instances - they are dynamically associated, so manual review is necessary + list_paginator = client.get_paginator("list_container_instances") + list_pages = list_paginator.paginate(cluster=data["clusterName"]) + for list_page in list_pages: + if len(list_page["containerInstanceArns"]) == 0: + continue + + container_instances = client.describe_container_instances( + cluster=data["clusterName"], + containerInstances=list_page["containerInstanceArns"], + ) + ec2_ids = [] + for instance_details in container_instances["containerInstances"]: + ec2_ids.append(instance_details["ec2InstanceId"]) + paginator = ec2_client.get_paginator("describe_instances") + pages = paginator.paginate(InstanceIds=ec2_ids) + for page in pages: + for reservation in page["Reservations"]: + for instance in reservation["Instances"]: + for network_interfaces in instance["NetworkInterfaces"]: + if ( + network_interfaces["VpcId"] + == self.vpc_options.vpc_id + ): + cluster_instance_digest = ResourceDigest( + id=instance["InstanceId"], + type="aws_ecs_cluster", + ) + resources_found.append( + Resource( + digest=cluster_instance_digest, + name=data["clusterName"], + details="Instance in EC2 cluster", + group="container", + tags=resource_tags(data), + ) + ) + self.relations_found.append( + ResourceEdge( + from_node=cluster_instance_digest, + to_node=ResourceDigest( + id=instance["InstanceId"], + type="aws_instance", + ), + ) + ) return resources_found diff --git a/cloudiscovery/provider/vpc/resource/database.py b/cloudiscovery/provider/vpc/resource/database.py index ab4e8d3..2f59c7e 100644 --- a/cloudiscovery/provider/vpc/resource/database.py +++ b/cloudiscovery/provider/vpc/resource/database.py @@ -7,8 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -23,37 +24,40 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception - def get_resources(self) -> List[Resource]: + @ResourceAvailable(services="rds") + def get_resources(self, instance_id=None) -> List[Resource]: client = self.vpc_options.client("rds") + params = { + "Name": "engine", + "Values": [ + "aurora", + "aurora-mysql", + "aurora-postgresql", + "mariadb", + "mysql", + "oracle-ee", + "oracle-se2", + "oracle-se1", + "oracle-se", + "postgres", + "sqlserver-ee", + "sqlserver-se", + "sqlserver-ex", + "sqlserver-web", + ], + } + + if instance_id is not None: + params.update({"Name": "db-instance-id", "Values": [instance_id]}) + resources_found = [] - response = client.describe_db_instances( - Filters=[ - { - "Name": "engine", - "Values": [ - "aurora", - "aurora-mysql", - "aurora-postgresql", - "mariadb", - "mysql", - "oracle-ee", - "oracle-se2", - "oracle-se1", - "oracle-se", - "postgres", - "sqlserver-ee", - "sqlserver-se", - "sqlserver-ex", - "sqlserver-web", - ], - } - ] - ) + response = client.describe_db_instances(Filters=[params]) - message_handler("Collecting data from RDS Instances...", "HEADER") + if instance_id is None and self.vpc_options.verbose: + message_handler("Collecting data from RDS Instances...", "HEADER") for data in response["DBInstances"]: if data["DBSubnetGroup"]["VpcId"] == self.vpc_options.vpc_id: @@ -102,6 +106,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="elasticache") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("elasticache") @@ -111,7 +116,8 @@ def get_resources(self) -> List[Resource]: # get all cache clusters response = client.describe_cache_clusters() - message_handler("Collecting data from Elasticache Clusters...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Elasticache Clusters...", "HEADER") # iterate cache clusters to get subnet groups for data in response["CacheClusters"]: @@ -161,6 +167,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="docdb") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("docdb") @@ -171,7 +178,8 @@ def get_resources(self) -> List[Resource]: Filters=[{"Name": "engine", "Values": ["docdb"]}] ) - message_handler("Collecting data from DocumentDB Instances...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from DocumentDB Instances...", "HEADER") # iterate cache clusters to get subnet groups for data in response["DBInstances"]: @@ -220,6 +228,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="neptune") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("neptune") @@ -230,7 +239,8 @@ def get_resources(self) -> List[Resource]: Filters=[{"Name": "engine", "Values": ["neptune"]}] ) - message_handler("Collecting data from Neptune Instances...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Neptune Instances...", "HEADER") # iterate cache clusters to get subnet groups for data in response["DBInstances"]: diff --git a/cloudiscovery/provider/vpc/resource/enduser.py b/cloudiscovery/provider/vpc/resource/enduser.py new file mode 100644 index 0000000..f4dcd2d --- /dev/null +++ b/cloudiscovery/provider/vpc/resource/enduser.py @@ -0,0 +1,79 @@ +from typing import List + +from provider.vpc.command import VpcOptions +from shared.common import ( + ResourceProvider, + Resource, + message_handler, + ResourceDigest, + ResourceEdge, + ResourceAvailable, +) +from shared.common_aws import resource_tags, get_name_tag +from shared.error_handler import exception + + +class WORKSPACES(ResourceProvider): + def __init__(self, vpc_options: VpcOptions): + """ + Workspaces + + :param vpc_options: + """ + super().__init__() + self.vpc_options = vpc_options + + @exception + @ResourceAvailable(services="workspaces") + def get_resources(self) -> List[Resource]: + + client = self.vpc_options.client("workspaces") + + resources_found = [] + + response = client.describe_workspaces() + + if self.vpc_options.verbose: + message_handler("Collecting data from Workspaces...", "HEADER") + + for data in response["Workspaces"]: + + # Get tag name + tags = client.describe_tags(ResourceId=data["WorkspaceId"]) + nametag = get_name_tag(tags) + + workspace_name = data["WorkspaceId"] if nametag is None else nametag + + directory_service = self.vpc_options.client("ds") + directories = directory_service.describe_directories( + DirectoryIds=[data["DirectoryId"]] + ) + + for directorie in directories["DirectoryDescriptions"]: + + if "VpcSettings" in directorie: + + if directorie["VpcSettings"]["VpcId"] == self.vpc_options.vpc_id: + workspace_digest = ResourceDigest( + id=data["WorkspaceId"], type="aws_workspaces" + ) + resources_found.append( + Resource( + digest=workspace_digest, + name=workspace_name, + details="", + group="enduser", + tags=resource_tags(tags), + ) + ) + + self.relations_found.append( + ResourceEdge( + from_node=workspace_digest, + to_node=ResourceDigest( + id=directorie["DirectoryId"], type="aws_ds" + ), + ) + ) + + return resources_found diff --git a/cloudiscovery/provider/vpc/resource/identity.py b/cloudiscovery/provider/vpc/resource/identity.py new file mode 100644 index 0000000..0d780bb --- /dev/null +++ b/cloudiscovery/provider/vpc/resource/identity.py @@ -0,0 +1,65 @@ +from typing import List + +from provider.vpc.command import VpcOptions +from shared.common import ( + ResourceProvider, + Resource, + message_handler, + ResourceDigest, + ResourceEdge, + ResourceAvailable, +) +from shared.common_aws import resource_tags +from shared.error_handler import exception + + +class DIRECTORYSERVICE(ResourceProvider): + def __init__(self, vpc_options: VpcOptions): + """ + Directory service + + :param vpc_options: + """ + super().__init__() + self.vpc_options = vpc_options + + @exception + @ResourceAvailable(services="ds") + def get_resources(self) -> List[Resource]: + + client = self.vpc_options.client("ds") + + resources_found = [] + + response = client.describe_directories() + + if self.vpc_options.verbose: + message_handler("Collecting data from Directory Services...", "HEADER") + + for data in response["DirectoryDescriptions"]: + + if "VpcSettings" in data: + + if data["VpcSettings"]["VpcId"] == self.vpc_options.vpc_id: + directory_service_digest = ResourceDigest( + id=data["DirectoryId"], type="aws_ds" + ) + resources_found.append( + Resource( + digest=directory_service_digest, + name=data["Name"], + details="", + group="identity", + tags=resource_tags(data), + ) + ) + + for subnet in data["VpcSettings"]["SubnetIds"]: + self.relations_found.append( + ResourceEdge( + from_node=directory_service_digest, + to_node=ResourceDigest(id=subnet, type="aws_subnet"), + ) + ) + + return resources_found diff --git a/cloudiscovery/provider/vpc/resource/management.py b/cloudiscovery/provider/vpc/resource/management.py index 3dd902f..5c9cda9 100644 --- a/cloudiscovery/provider/vpc/resource/management.py +++ b/cloudiscovery/provider/vpc/resource/management.py @@ -7,8 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -23,6 +24,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="synthetics") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("synthetics") @@ -31,7 +33,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_canaries() - message_handler("Collecting data from Synthetic Canaries...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Synthetic Canaries...", "HEADER") for data in response["Canaries"]: diff --git a/cloudiscovery/provider/vpc/resource/mediaservices.py b/cloudiscovery/provider/vpc/resource/mediaservices.py index 4038bd6..5254221 100644 --- a/cloudiscovery/provider/vpc/resource/mediaservices.py +++ b/cloudiscovery/provider/vpc/resource/mediaservices.py @@ -9,9 +9,9 @@ ResourceDigest, ResourceEdge, datetime_to_string, - resource_tags, + ResourceAvailable, ) -from shared.common_aws import describe_subnet +from shared.common_aws import describe_subnet, resource_tags from shared.error_handler import exception @@ -26,6 +26,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="mediaconnect") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("mediaconnect") @@ -34,7 +35,8 @@ def get_resources(self) -> List[Resource]: response = client.list_flows() - message_handler("Collecting data from Media Connect...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Media Connect...", "HEADER") for data in response["Flows"]: tags_response = client.list_tags_for_resource(ResourceArn=data["FlowArn"]) @@ -91,6 +93,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="medialive") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("medialive") @@ -99,7 +102,8 @@ def get_resources(self) -> List[Resource]: response = client.list_inputs() - message_handler("Collecting data from Media Live Inputs...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Media Live Inputs...", "HEADER") for data in response["Inputs"]: tags_response = client.list_tags_for_resource(ResourceArn=data["Arn"]) @@ -142,6 +146,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="mediastore") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("mediastore") @@ -150,7 +155,8 @@ def get_resources(self) -> List[Resource]: response = client.list_containers() - message_handler("Collecting data from Media Store...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Media Store...", "HEADER") for data in response["Containers"]: diff --git a/cloudiscovery/provider/vpc/resource/ml.py b/cloudiscovery/provider/vpc/resource/ml.py index 039d0a3..cdacb9e 100644 --- a/cloudiscovery/provider/vpc/resource/ml.py +++ b/cloudiscovery/provider/vpc/resource/ml.py @@ -7,9 +7,9 @@ message_handler, ResourceDigest, ResourceEdge, - resource_tags, + ResourceAvailable, ) -from shared.common_aws import describe_subnet +from shared.common_aws import describe_subnet, resource_tags from shared.error_handler import exception @@ -24,6 +24,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="sagemaker") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sagemaker") @@ -32,9 +33,10 @@ def get_resources(self) -> List[Resource]: response = client.list_notebook_instances() - message_handler( - "Collecting data from Sagemaker Notebook instances...", "HEADER" - ) + if self.vpc_options.verbose: + message_handler( + "Collecting data from Sagemaker Notebook instances...", "HEADER" + ) for data in response["NotebookInstances"]: @@ -87,6 +89,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="sagemaker") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sagemaker") @@ -95,7 +98,8 @@ def get_resources(self) -> List[Resource]: response = client.list_training_jobs() - message_handler("Collecting data from Sagemaker Training Job...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Sagemaker Training Job...", "HEADER") for data in response["TrainingJobSummaries"]: tags_response = client.list_tags(ResourceArn=data["TrainingJobArn"],) @@ -153,6 +157,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="sagemaker") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sagemaker") @@ -161,7 +166,8 @@ def get_resources(self) -> List[Resource]: response = client.list_models() - message_handler("Collecting data from Sagemaker Model...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Sagemaker Model...", "HEADER") for data in response["Models"]: tags_response = client.list_tags(ResourceArn=data["ModelArn"],) diff --git a/cloudiscovery/provider/vpc/resource/network.py b/cloudiscovery/provider/vpc/resource/network.py index 2952e0b..7e6094c 100644 --- a/cloudiscovery/provider/vpc/resource/network.py +++ b/cloudiscovery/provider/vpc/resource/network.py @@ -7,12 +7,12 @@ ResourceProvider, Resource, message_handler, - get_name_tag, ResourceDigest, ResourceEdge, datetime_to_string, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags, get_name_tag from shared.error_handler import exception @@ -27,6 +27,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -36,7 +37,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_internet_gateways(Filters=filters) - message_handler("Collecting data from Internet Gateways...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Internet Gateways...", "HEADER") # One VPC has only 1 IGW then it's a direct check if len(response["InternetGateways"]) > 0: @@ -81,6 +83,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -91,41 +94,38 @@ def get_resources(self) -> List[Resource]: response = client.describe_nat_gateways(Filters=filters) - message_handler("Collecting data from NAT Gateways...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from NAT Gateways...", "HEADER") - if len(response["NatGateways"]) > 0: + for data in response["NatGateways"]: - for data in response["NatGateways"]: - - if data["VpcId"] == self.vpc_options.vpc_id: - nametag = get_name_tag(data) + if data["VpcId"] == self.vpc_options.vpc_id: + nametag = get_name_tag(data) - name = data["NatGatewayId"] if nametag is None else nametag + name = data["NatGatewayId"] if nametag is None else nametag - nat_digest = ResourceDigest( - id=data["NatGatewayId"], type="aws_nat_gateway" - ) - resources_found.append( - Resource( - digest=nat_digest, - name=name, - details="NAT Gateway Private IP {}, Public IP {}, Subnet id {}".format( - data["NatGatewayAddresses"][0]["PrivateIp"], - data["NatGatewayAddresses"][0]["PublicIp"], - data["SubnetId"], - ), - group="network", - tags=resource_tags(data), - ) + nat_digest = ResourceDigest( + id=data["NatGatewayId"], type="aws_nat_gateway" + ) + resources_found.append( + Resource( + digest=nat_digest, + name=name, + details="NAT Gateway Private IP {}, Public IP {}, Subnet id {}".format( + data["NatGatewayAddresses"][0]["PrivateIp"], + data["NatGatewayAddresses"][0]["PublicIp"], + data["SubnetId"], + ), + group="network", + tags=resource_tags(data), ) - self.relations_found.append( - ResourceEdge( - from_node=nat_digest, - to_node=ResourceDigest( - id=data["SubnetId"], type="aws_subnet" - ), - ) + ) + self.relations_found.append( + ResourceEdge( + from_node=nat_digest, + to_node=ResourceDigest(id=data["SubnetId"], type="aws_subnet"), ) + ) return resources_found @@ -141,6 +141,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="elb") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("elb") @@ -149,34 +150,33 @@ def get_resources(self) -> List[Resource]: response = client.describe_load_balancers() - message_handler("Collecting data from Classic Load Balancers...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Classic Load Balancers...", "HEADER") - if len(response["LoadBalancerDescriptions"]) > 0: - - for data in response["LoadBalancerDescriptions"]: - if data["VPCId"] == self.vpc_options.vpc_id: - tags_response = client.describe_tags( - LoadBalancerNames=[data["LoadBalancerName"]] - ) - elb_digest = ResourceDigest( - id=data["LoadBalancerName"], type="aws_elb_classic" - ) - for subnet_id in data["Subnets"]: - self.relations_found.append( - ResourceEdge( - from_node=elb_digest, - to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), - ) - ) - resources_found.append( - Resource( - digest=elb_digest, - name=data["LoadBalancerName"], - details="", - group="network", - tags=resource_tags(tags_response["TagDescriptions"][0]), + for data in response["LoadBalancerDescriptions"]: + if data["VPCId"] == self.vpc_options.vpc_id: + tags_response = client.describe_tags( + LoadBalancerNames=[data["LoadBalancerName"]] + ) + elb_digest = ResourceDigest( + id=data["LoadBalancerName"], type="aws_elb_classic" + ) + for subnet_id in data["Subnets"]: + self.relations_found.append( + ResourceEdge( + from_node=elb_digest, + to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), ) ) + resources_found.append( + Resource( + digest=elb_digest, + name=data["LoadBalancerName"], + details="", + group="network", + tags=resource_tags(tags_response["TagDescriptions"][0]), + ) + ) return resources_found @@ -192,6 +192,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="elb") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("elbv2") @@ -200,41 +201,40 @@ def get_resources(self) -> List[Resource]: response = client.describe_load_balancers() - message_handler("Collecting data from Application Load Balancers...", "HEADER") - - if len(response["LoadBalancers"]) > 0: + if self.vpc_options.verbose: + message_handler( + "Collecting data from Application Load Balancers...", "HEADER" + ) - for data in response["LoadBalancers"]: + for data in response["LoadBalancers"]: - if data["VpcId"] == self.vpc_options.vpc_id: - tags_response = client.describe_tags( - ResourceArns=[data["LoadBalancerArn"]] - ) - elb_digest = ResourceDigest( - id=data["LoadBalancerName"], type="aws_elb" - ) + if data["VpcId"] == self.vpc_options.vpc_id: + tags_response = client.describe_tags( + ResourceArns=[data["LoadBalancerArn"]] + ) + elb_digest = ResourceDigest(id=data["LoadBalancerName"], type="aws_elb") - subnet_ids = [] - for availabilityZone in data["AvailabilityZones"]: - subnet_ids.append(availabilityZone["SubnetId"]) - self.relations_found.append( - ResourceEdge( - from_node=elb_digest, - to_node=ResourceDigest( - id=availabilityZone["SubnetId"], type="aws_subnet" - ), - ) + subnet_ids = [] + for availabilityZone in data["AvailabilityZones"]: + subnet_ids.append(availabilityZone["SubnetId"]) + self.relations_found.append( + ResourceEdge( + from_node=elb_digest, + to_node=ResourceDigest( + id=availabilityZone["SubnetId"], type="aws_subnet" + ), ) + ) - resources_found.append( - Resource( - digest=elb_digest, - name=data["LoadBalancerName"], - details="", - group="network", - tags=resource_tags(tags_response["TagDescriptions"][0]), - ) + resources_found.append( + Resource( + digest=elb_digest, + name=data["LoadBalancerName"], + details="", + group="network", + tags=resource_tags(tags_response["TagDescriptions"][0]), ) + ) return resources_found @@ -250,6 +250,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -260,7 +261,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_route_tables(Filters=filters) - message_handler("Collecting data from Route Tables...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Route Tables...", "HEADER") # Iterate to get all route table filtered for data in response["RouteTables"]: @@ -326,8 +328,8 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: - client = self.vpc_options.client("ec2") resources_found = [] @@ -336,7 +338,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_subnets(Filters=filters) - message_handler("Collecting data from Subnets...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Subnets...", "HEADER") for data in response["Subnets"]: nametag = get_name_tag(data) @@ -376,6 +379,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -386,7 +390,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_network_acls(Filters=filters) - message_handler("Collecting data from NACLs...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from NACLs...", "HEADER") for data in response["NetworkAcls"]: nacl_digest = ResourceDigest( @@ -431,8 +436,8 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: - client = self.vpc_options.client("ec2") resources_found = [] @@ -441,7 +446,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_security_groups(Filters=filters) - message_handler("Collecting data from Security Groups...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from Security Groups...", "HEADER") for data in response["SecurityGroups"]: group_digest = ResourceDigest(id=data["GroupId"], type="aws_security_group") @@ -474,6 +480,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -482,7 +489,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_vpc_peering_connections() - message_handler("Collecting data from VPC Peering...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from VPC Peering...", "HEADER") for data in response["VpcPeeringConnections"]: @@ -535,6 +543,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") vpc_response = client.describe_vpcs(VpcIds=[self.vpc_options.vpc_id]) @@ -558,6 +567,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="ec2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") @@ -568,7 +578,8 @@ def get_resources(self) -> List[Resource]: response = client.describe_vpc_endpoints(Filters=filters) - message_handler("Collecting data from VPC Endpoints...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from VPC Endpoints...", "HEADER") for data in response["VpcEndpoints"]: @@ -628,6 +639,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="apigateway") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("apigateway") @@ -637,7 +649,8 @@ def get_resources(self) -> List[Resource]: # get REST API available response = client.get_rest_apis() - message_handler("Collecting data from REST API Policies...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from REST API Policies...", "HEADER") with ThreadPoolExecutor(15) as executor: results = executor.map(self.analyze_restapi, response["items"]) @@ -678,3 +691,163 @@ def analyze_restapi(self, data): ), ) return False, None + + +class VpnConnection(ResourceProvider): + def __init__(self, vpc_options: VpcOptions): + """ + Vpn Connections + + :param vpc_options: + """ + super().__init__() + self.vpc_options = vpc_options + + @exception + @ResourceAvailable(services="ec2") + def get_resources(self) -> List[Resource]: + client = self.vpc_options.client("ec2") + vpn_response = client.describe_vpn_connections() + resources: List[Resource] = [] + + for vpn_connection in vpn_response["VpnConnections"]: + if ( + "VpnGatewayId" in vpn_connection + and vpn_connection["VpnGatewayId"] != "" + ): + vpn_gateway_id = vpn_connection["VpnGatewayId"] + vpn_gateway_response = client.describe_vpn_gateways( + Filters=[ + { + "Name": "attachment.vpc-id", + "Values": [self.vpc_options.vpc_id,], + } + ], + VpnGatewayIds=[vpn_gateway_id], + ) + if vpn_gateway_response["VpnGateways"]: + connection_digest = ResourceDigest( + id=vpn_connection["VpnConnectionId"], type="aws_vpn_connection" + ) + vpn_nametag = get_name_tag(vpn_connection) + vpn_name = ( + vpn_connection["VpnConnectionId"] + if vpn_nametag is None + else vpn_nametag + ) + resources.append( + Resource( + digest=connection_digest, + name=vpn_name, + group="network", + tags=resource_tags(vpn_connection), + ) + ) + + self.relations_found.append( + ResourceEdge( + from_node=connection_digest, + to_node=self.vpc_options.vpc_digest(), + ) + ) + + vpn_gateway_digest = ResourceDigest( + id=vpn_gateway_id, type="aws_vpn_gateway" + ) + vgw_nametag = get_name_tag(vpn_gateway_response["VpnGateways"][0]) + vgw_name = vpn_gateway_id if vgw_nametag is None else vgw_nametag + resources.append( + Resource( + digest=vpn_gateway_digest, + name=vgw_name, + group="network", + tags=resource_tags(vpn_gateway_response["VpnGateways"][0]), + ) + ) + + self.relations_found.append( + ResourceEdge( + from_node=connection_digest, to_node=vpn_gateway_digest + ) + ) + + if ( + "CustomerGatewayId" in vpn_connection + and vpn_connection["CustomerGatewayId"] != "" + ): + self.add_customer_gateway( + client, connection_digest, resources, vpn_connection + ) + + return resources + + def add_customer_gateway( + self, client, connection_digest, resources, vpn_connection + ): + customer_gateway_id = vpn_connection["CustomerGatewayId"] + vcw_gateway_response = client.describe_customer_gateways( + CustomerGatewayIds=[customer_gateway_id] + ) + vcw_gateway_digest = ResourceDigest( + id=customer_gateway_id, type="aws_customer_gateway" + ) + vcw_nametag = get_name_tag(vcw_gateway_response["CustomerGateways"][0]) + vcw_name = customer_gateway_id if vcw_nametag is None else vcw_nametag + resources.append( + Resource( + digest=vcw_gateway_digest, + name=vcw_name, + group="network", + tags=resource_tags(vcw_gateway_response["CustomerGateways"][0]), + ) + ) + self.relations_found.append( + ResourceEdge(from_node=connection_digest, to_node=vcw_gateway_digest) + ) + + +class VpnClientEndpoint(ResourceProvider): + def __init__(self, vpc_options: VpcOptions): + """ + Vpn Client Endpoints + + :param vpc_options: + """ + super().__init__() + self.vpc_options = vpc_options + + @exception + @ResourceAvailable(services="ec2") + def get_resources(self) -> List[Resource]: + client = self.vpc_options.client("ec2") + client_vpn_endpoints = client.describe_client_vpn_endpoints() + resources: List[Resource] = [] + + for client_vpn_endpoint in client_vpn_endpoints["ClientVpnEndpoints"]: + if client_vpn_endpoint["VpcId"] == self.vpc_options.vpc_id: + digest = ResourceDigest( + id=client_vpn_endpoint["ClientVpnEndpointId"], + type="aws_vpn_client_endpoint", + ) + nametag = get_name_tag(client_vpn_endpoint) + name = ( + client_vpn_endpoint["ClientVpnEndpointId"] + if nametag is None + else nametag + ) + resources.append( + Resource( + digest=digest, + name=name, + group="network", + tags=resource_tags(client_vpn_endpoint), + ) + ) + + self.relations_found.append( + ResourceEdge( + from_node=digest, to_node=self.vpc_options.vpc_digest() + ) + ) + + return resources diff --git a/cloudiscovery/provider/vpc/resource/security.py b/cloudiscovery/provider/vpc/resource/security.py index ac3e968..0383bf1 100644 --- a/cloudiscovery/provider/vpc/resource/security.py +++ b/cloudiscovery/provider/vpc/resource/security.py @@ -10,8 +10,9 @@ ResourceDigest, ResourceEdge, datetime_to_string, - resource_tags, + ResourceAvailable, ) +from shared.common_aws import resource_tags from shared.error_handler import exception @@ -26,13 +27,15 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="iam") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("iam") resources_found = [] - message_handler("Collecting data from IAM Policies...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from IAM Policies...", "HEADER") paginator = client.get_paginator("list_policies") pages = paginator.paginate(Scope="Local") for policies in pages: @@ -88,6 +91,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="cloudhsmv2") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("cloudhsmv2") @@ -96,33 +100,32 @@ def get_resources(self) -> List[Resource]: response = client.describe_clusters() - message_handler("Collecting data from CloudHSM clusters...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from CloudHSM clusters...", "HEADER") - if len(response["Clusters"]) > 0: + for data in response["Clusters"]: - for data in response["Clusters"]: - - if data["VpcId"] == self.vpc_options.vpc_id: - cloudhsm_digest = ResourceDigest( - id=data["ClusterId"], type="aws_cloudhsm" - ) - resources_found.append( - Resource( - digest=cloudhsm_digest, - name=data["ClusterId"], - details="", - group="security", - tags=resource_tags(data), - ) + if data["VpcId"] == self.vpc_options.vpc_id: + cloudhsm_digest = ResourceDigest( + id=data["ClusterId"], type="aws_cloudhsm" + ) + resources_found.append( + Resource( + digest=cloudhsm_digest, + name=data["ClusterId"], + details="", + group="security", + tags=resource_tags(data), ) + ) - for subnet in data["SubnetMapping"]: - subnet_id = data["SubnetMapping"][subnet] - self.relations_found.append( - ResourceEdge( - from_node=cloudhsm_digest, - to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), - ) + for subnet in data["SubnetMapping"]: + subnet_id = data["SubnetMapping"][subnet] + self.relations_found.append( + ResourceEdge( + from_node=cloudhsm_digest, + to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), ) + ) return resources_found diff --git a/cloudiscovery/provider/vpc/resource/storage.py b/cloudiscovery/provider/vpc/resource/storage.py index 0bb5129..1f14323 100644 --- a/cloudiscovery/provider/vpc/resource/storage.py +++ b/cloudiscovery/provider/vpc/resource/storage.py @@ -12,10 +12,9 @@ ResourceDigest, ResourceEdge, datetime_to_string, - resource_tags, - get_name_tag, + ResourceAvailable, ) -from shared.common_aws import describe_subnet +from shared.common_aws import describe_subnet, resource_tags, get_name_tag from shared.error_handler import exception @@ -30,6 +29,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="efs") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("efs") @@ -39,7 +39,8 @@ def get_resources(self) -> List[Resource]: # get filesystems available response = client.describe_file_systems() - message_handler("Collecting data from EFS Mount Targets...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from EFS Mount Targets...", "HEADER") for data in response["FileSystems"]: @@ -95,6 +96,7 @@ def __init__(self, vpc_options: VpcOptions): self.vpc_options = vpc_options @exception + @ResourceAvailable(services="s3") def get_resources(self) -> List[Resource]: client = self.vpc_options.client("s3") @@ -104,7 +106,8 @@ def get_resources(self) -> List[Resource]: # get buckets available response = client.list_buckets() - message_handler("Collecting data from S3 Bucket Policies...", "HEADER") + if self.vpc_options.verbose: + message_handler("Collecting data from S3 Bucket Policies...", "HEADER") with ThreadPoolExecutor(15) as executor: results = executor.map( diff --git a/cloudiscovery/shared/command.py b/cloudiscovery/shared/command.py index d79cea5..bd2c601 100644 --- a/cloudiscovery/shared/command.py +++ b/cloudiscovery/shared/command.py @@ -1,46 +1,18 @@ -import importlib -import inspect -import os -from os.path import dirname -from typing import Dict, List - -from boto3 import Session +from typing import List from shared.common import ( - ResourceProvider, Resource, - message_handler, - ResourceDigest, ResourceEdge, - BaseAwsOptions, Filterable, ResourceTag, ResourceType, ) -from shared.diagram import BaseDiagram -from shared.report import Report - - -class BaseCommand: - def __init__(self, region_names, session, diagram, filters): - """ - Base class for discovery command - - :param region_names: - :param session: - :param diagram: - :param filters: - """ - self.region_names: List[str] = region_names - self.session: Session = session - self.diagram: bool = diagram - self.filters: List[Filterable] = filters def filter_resources( resources: List[Resource], filters: List[Filterable] ) -> List[Resource]: - if len(filters) == 0: + if not filters: return resources filtered_resources = [] @@ -80,115 +52,8 @@ def filter_relations( return filtered_relations -class CommandRunner(object): - def __init__(self, filters): - """ - Base class command execution - - :param filters: - """ - self.filters: List[Filterable] = filters - - # pylint: disable=too-many-locals,too-many-arguments - def run( - self, - provider: str, - options: BaseAwsOptions, - diagram_builder: BaseDiagram, - title: str, - filename: str, - ): - """ - Executes a command. - - The project's development pattern is a file with the respective name of the parent - resource (e.g. compute, network), classes of child resources inside this file and run() method to execute - respective check. So it makes sense to load dynamically. - """ - # Iterate to get all modules - message_handler("\nInspecting resources", "HEADER") - providers = [] - for name in os.listdir( - dirname(__file__) + "/../provider/" + provider + "/resource" - ): - if name.endswith(".py"): - # strip the extension - module = name[:-3] - - # Load and call all run check - for nameclass, cls in inspect.getmembers( - importlib.import_module( - "provider." + provider + ".resource." + module - ), - inspect.isclass, - ): - if ( - issubclass(cls, ResourceProvider) - and cls is not ResourceProvider - ): - providers.append((nameclass, cls)) - providers.sort(key=lambda x: x[0]) - - all_resources: List[Resource] = [] - resource_relations: List[ResourceEdge] = [] - - for providerTuple in providers: - provider_instance = providerTuple[1](options) - - provider_resources = provider_instance.get_resources() - if provider_resources is not None: - all_resources.extend(provider_resources) - - provider_resource_relations = provider_instance.get_relations() - if provider_resource_relations is not None: - resource_relations.extend(provider_resource_relations) - - unique_resources_dict: Dict[ResourceDigest, Resource] = dict() - for resource in all_resources: - unique_resources_dict[resource.digest] = resource - - unique_resources = list(unique_resources_dict.values()) - - unique_resources.sort(key=lambda x: x.group + x.digest.type + x.name) - resource_relations.sort( - key=lambda x: x.from_node.type - + x.from_node.id - + x.to_node.type - + x.to_node.id - ) - - # Resource filtering and sorting - filtered_resources = filter_resources(unique_resources, self.filters) - filtered_resources.sort(key=lambda x: x.group + x.digest.type + x.name) - - # Relationships filtering and sorting - filtered_relations = filter_relations(filtered_resources, resource_relations) - filtered_relations.sort( - key=lambda x: x.from_node.type - + x.from_node.id - + x.to_node.type - + x.to_node.id - ) - - # Diagram integration - diagram_builder.build( - resources=filtered_resources, - resource_relations=filtered_relations, - title=title, - filename=filename, - ) - - # TODO: Generate reports in json/csv/pdf/xls - report = Report() - report.general_report( - resources=filtered_resources, resource_relations=filtered_relations - ), - report.html_report( - resources=filtered_resources, - resource_relations=filtered_relations, - title=title, - filename=filename, - ) - - # TODO: Export in csv/json/yaml/tf... future... - # ....exporttf(checks).... +def execute_provider(options, data) -> (List[Resource], List[ResourceEdge]): + provider_instance = data[1](options) + provider_resources = provider_instance.get_resources() + provider_resource_relations = provider_instance.get_relations() + return provider_resources, provider_resource_relations diff --git a/cloudiscovery/shared/common.py b/cloudiscovery/shared/common.py index c76e98f..3975a7d 100644 --- a/cloudiscovery/shared/common.py +++ b/cloudiscovery/shared/common.py @@ -1,8 +1,12 @@ +import os.path import datetime import re -from typing import NamedTuple, List, Optional, Dict +import functools +import threading +from abc import ABC +from typing import NamedTuple, List, Dict -import boto3 +from diskcache import Cache VPCE_REGEX = re.compile(r'(?<=sourcevpce")(\s*:\s*")(vpce-[a-zA-Z0-9]+)', re.DOTALL) SOURCE_IP_ADDRESS_REGEX = re.compile( @@ -13,6 +17,8 @@ FILTER_TYPE_NAME = "type" FILTER_VALUE_PREFIX = "Value=" +_LOG_SEMAPHORE = threading.Semaphore() + class bcolors: colors = { @@ -27,22 +33,6 @@ class bcolors: } -class BaseAwsOptions(NamedTuple): - session: boto3.Session - region_name: str - - def client(self, service_name: str): - return self.session.client(service_name, region_name=self.region_name) - - def resulting_file_name(self, suffix): - return "{}_{}_{}".format(self.account_number(), self.region_name, suffix) - - def account_number(self): - client = self.session.client("sts", region_name=self.region_name) - account_id = client.get_caller_identity()["Account"] - return account_id - - class ResourceDigest(NamedTuple): id: str type: str @@ -58,6 +48,16 @@ class Filterable: pass +class LimitsValues(NamedTuple): + service: str + quota_name: str + quota_code: str + aws_limit: int + local_limit: int + usage: int + percent: float + + class ResourceTag(NamedTuple, Filterable): key: str value: str @@ -69,72 +69,75 @@ class ResourceType(NamedTuple, Filterable): class Resource(NamedTuple): digest: ResourceDigest - name: str + name: str = "" details: str = "" group: str = "" tags: List[ResourceTag] = [] + limits: LimitsValues = None + attributes: Dict[str, object] = {} -def resource_tags(resource_data: dict) -> List[ResourceTag]: - if "Tags" in resource_data: - tags_input = resource_data["Tags"] - elif "tags" in resource_data: - tags_input = resource_data["tags"] - elif "TagList" in resource_data: - tags_input = resource_data["TagList"] - elif "TagSet" in resource_data: - tags_input = resource_data["TagSet"] - else: - tags_input = None - - tags = [] - if isinstance(tags_input, list): - tags = resource_tags_from_tuples(tags_input) - elif isinstance(tags_input, dict): - tags = resource_tags_from_dict(tags_input) - - return tags - - -def resource_tags_from_tuples(tuples: List[Dict[str, str]]) -> List[ResourceTag]: - """ - List of key-value tuples that store tags, syntax: - [ - { - 'Key': 'string', - 'Value': 'string', - ... - }, - ] - OR - [ - { - 'key': 'string', - 'value': 'string', - ... - }, - ] - """ - result = [] - for tuple_elem in tuples: - if "Key" in tuple_elem and "Value" in tuple_elem: - result.append(ResourceTag(key=tuple_elem["Key"], value=tuple_elem["Value"])) - elif "key" in tuple_elem and "value" in tuple_elem: - result.append(ResourceTag(key=tuple_elem["key"], value=tuple_elem["value"])) - return result - - -def resource_tags_from_dict(tags: Dict[str, str]) -> List[ResourceTag]: - """ - List of key-value dict that store tags, syntax: - { - 'string': 'string' - } - """ - result = [] - for key, value in tags.items(): - result.append(ResourceTag(key=key, value=value)) - return result +class ResourceCache: + def __init__(self): + self.cache = Cache( + directory=os.path.dirname(os.path.abspath(__file__)) + + "/../../assets/.cache/" + ) + + def set_key(self, key: str, value: object, expire: int): + self.cache.set(key=key, value=value, expire=expire) + + def get_key(self, key: str): + if key in self.cache: + return self.cache[key] + + return None + + +# Decorator to check services. +class ResourceAvailable(object): + def __init__(self, services): + self.services = services + self.cache = ResourceCache() + + def is_service_available(self, region_name, service_name) -> bool: + cache_key = "aws_paths_" + region_name + cache = self.cache.get_key(cache_key) + return service_name in cache + + def __call__(self, func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + + if "vpc_options" in dir(args[0]): + region_name = args[0].vpc_options.region_name + elif "iot_options" in dir(args[0]): + region_name = args[0].iot_options.region_name + else: + region_name = "us-east-1" + + if self.is_service_available(region_name, self.services): + return func(*args, **kwargs) + + verbose = False + if "vpc_options" in dir(args[0]): + verbose = args[0].vpc_options.verbose + elif "iot_options" in dir(args[0]): + verbose = args[0].iot_options.verbose + elif "options" in dir(args[0]): + verbose = args[0].options.verbose + + if verbose: + message_handler( + "Check " + + func.__qualname__ + + " not available in this region... Skipping", + "WARNING", + ) + + return None + + return wrapper class ResourceProvider: @@ -153,42 +156,19 @@ def get_relations(self) -> List[ResourceEdge]: return self.relations_found -def get_name_tag(d) -> Optional[str]: - return get_tag(d, "Name") - - -def get_tag(d, tag_name) -> Optional[str]: - for k, v in d.items(): - if k == "Tags": - for value in v: - if value["Key"] == tag_name: - return value["Value"] - - return None - - -def generate_session(profile_name): - try: - return boto3.Session(profile_name=profile_name) - # pylint: disable=broad-except - except Exception as e: - message = "You must configure awscli before use this script.\nError: {0}".format( - str(e) - ) - exit_critical(message) - - def exit_critical(message): log_critical(message) raise SystemExit def log_critical(message): - print(bcolors.colors.get("FAIL"), message, bcolors.colors.get("ENDC"), sep="") + message_handler(message, "FAIL") def message_handler(message, position): + _LOG_SEMAPHORE.acquire() print(bcolors.colors.get(position), message, bcolors.colors.get("ENDC"), sep="") + _LOG_SEMAPHORE.release() # pylint: disable=inconsistent-return-statements @@ -246,3 +226,27 @@ def parse_filters(arg_filters) -> List[Filterable]: _add_filter(filters, is_tag, full_name, "".join(val_buffer)) return filters + + +class BaseCommand(ABC): + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + raise NotImplementedError() + + +class Object(object): + pass + + +class BaseOptions(Object): + verbose: bool + filters: List[Filterable] + + def __init__(self, verbose: bool, filters: List[Filterable]): + self.verbose = verbose + self.filters = filters diff --git a/cloudiscovery/shared/common_aws.py b/cloudiscovery/shared/common_aws.py index 7872c46..02eb2b9 100644 --- a/cloudiscovery/shared/common_aws.py +++ b/cloudiscovery/shared/common_aws.py @@ -1,13 +1,35 @@ +import importlib +import inspect +import os +from concurrent.futures.thread import ThreadPoolExecutor +from os.path import dirname +from typing import List, Dict, Optional + import botocore.exceptions +import boto3 +from boto3 import Session from cachetools import TTLCache -from provider.vpc.command import VpcOptions - +from shared.command import execute_provider, filter_resources, filter_relations +from shared.common import ( + ResourceCache, + message_handler, + ResourceTag, + ResourceProvider, + Resource, + ResourceEdge, + ResourceDigest, + Filterable, + exit_critical, + BaseCommand, +) +from shared.diagram import BaseDiagram +from shared.report import Report SUBNET_CACHE = TTLCache(maxsize=1024, ttl=60) -def describe_subnet(vpc_options: VpcOptions, subnet_ids): +def describe_subnet(vpc_options, subnet_ids): if not isinstance(subnet_ids, list): subnet_ids = [subnet_ids] @@ -21,3 +43,335 @@ def describe_subnet(vpc_options: VpcOptions, subnet_ids): return subnets except botocore.exceptions.ClientError: return None + + +def aws_verbose(): + """ + Boto3 only provides usable information in DEBUG mode + Using empty name it catchs debug from boto3/botocore + TODO: Open a ticket in boto3/botocore project to provide more information at other levels of debugging + """ + boto3.set_stream_logger(name="") + + +class BaseAwsOptions: + session: boto3.Session + region_name: str + + def __init__(self, session, region_name): + """ + Base AWS options + + :param session: + :param region_name: + """ + self.session = session + self.region_name = region_name + + def client(self, service_name: str): + return self.session.client(service_name, region_name=self.region_name) + + def resulting_file_name(self, suffix): + return "{}_{}_{}".format(self.account_number(), self.region_name, suffix) + + def account_number(self): + client = self.session.client("sts", region_name=self.region_name) + account_id = client.get_caller_identity()["Account"] + return account_id + + +class GlobalParameters: + def __init__(self, session, region: str, path: str): + self.region = region + self.session = session.client("ssm", region_name="us-east-1") + self.path = path + self.cache = ResourceCache() + + def get_parameters_by_path(self, next_token=None): + + params = {"Path": self.path, "Recursive": True, "MaxResults": 10} + if next_token is not None: + params["NextToken"] = next_token + + return self.session.get_parameters_by_path(**params) + + def parameters(self): + next_token = None + while True: + response = self.get_parameters_by_path(next_token) + parameters = response["Parameters"] + if not parameters: + break + for parameter in parameters: + yield parameter + if "NextToken" not in response: + break + next_token = response["NextToken"] + + def paths(self): + + cache_key = "aws_paths_" + self.region + cache = self.cache.get_key(cache_key) + + if cache is not None: + return cache + + message_handler( + "Fetching available resources in region {} to cache...".format(self.region), + "HEADER", + ) + paths_found = [] + paths = self.parameters() + for path in paths: + paths_found.append(path["Value"]) + + self.cache.set_key(key=cache_key, value=paths_found, expire=86400) + return paths_found + + +class BaseAwsCommand(BaseCommand): + def __init__(self, region_names, session): + """ + Base class for discovery command + + :param region_names: + :param session: + """ + self.region_names: List[str] = region_names + self.session: Session = session + + def run( + self, + diagram: bool, + verbose: bool, + services: List[str], + filters: List[Filterable], + ): + raise NotImplementedError() + + def init_region_cache(self, region): + # Get and cache SSM services available in specific region + path = "/aws/service/global-infrastructure/regions/" + region + "/services/" + GlobalParameters(session=self.session, region=region, path=path).paths() + + +def resource_tags(resource_data: dict) -> List[ResourceTag]: + if "Tags" in resource_data: + tags_input = resource_data["Tags"] + elif "tags" in resource_data: + tags_input = resource_data["tags"] + elif "TagList" in resource_data: + tags_input = resource_data["TagList"] + elif "TagSet" in resource_data: + tags_input = resource_data["TagSet"] + else: + tags_input = None + + tags = [] + if isinstance(tags_input, list): + tags = resource_tags_from_tuples(tags_input) + elif isinstance(tags_input, dict): + tags = resource_tags_from_dict(tags_input) + + return tags + + +def resource_tags_from_tuples(tuples: List[Dict[str, str]]) -> List[ResourceTag]: + """ + List of key-value tuples that store tags, syntax: + [ + { + 'Key': 'string', + 'Value': 'string', + ... + }, + ] + OR + [ + { + 'key': 'string', + 'value': 'string', + ... + }, + ] + """ + result = [] + for tuple_elem in tuples: + if "Key" in tuple_elem and "Value" in tuple_elem: + result.append(ResourceTag(key=tuple_elem["Key"], value=tuple_elem["Value"])) + elif "key" in tuple_elem and "value" in tuple_elem: + result.append(ResourceTag(key=tuple_elem["key"], value=tuple_elem["value"])) + return result + + +def resource_tags_from_dict(tags: Dict[str, str]) -> List[ResourceTag]: + """ + List of key-value dict that store tags, syntax: + { + 'string': 'string' + } + """ + result = [] + for key, value in tags.items(): + result.append(ResourceTag(key=key, value=value)) + return result + + +def get_name_tag(d) -> Optional[str]: + return get_tag(d, "Name") + + +def get_tag(d, tag_name) -> Optional[str]: + for k, v in d.items(): + if k in ("Tags", "TagList"): + for value in v: + if value["Key"] == tag_name: + return value["Value"] + + return None + + +def generate_session(profile_name): + try: + return boto3.Session(profile_name=profile_name) + # pylint: disable=broad-except + except Exception as e: + message = "You must configure awscli before use this script.\nError: {0}".format( + str(e) + ) + exit_critical(message) + + +def get_paginator(client, operation_name, resource_type, filters=None): + # Checking if can paginate + if client.can_paginate(operation_name): + paginator = client.get_paginator(operation_name) + if resource_type == "aws_iam_policy": + pages = paginator.paginate( + Scope="Local" + ) # hack to list only local IAM policies - aws_all + else: + if filters: + pages = paginator.paginate(**filters) + else: + pages = paginator.paginate() + else: + return False + + return pages + + +class AwsCommandRunner(object): + def __init__(self, services=None, filters=None): + """ + Base class command execution + + :param services: + :param filters: + """ + self.services: List[str] = services + self.filters: List[Filterable] = filters + + # pylint: disable=too-many-locals,too-many-arguments + def run( + self, + provider: str, + options: BaseAwsOptions, + diagram_builder: BaseDiagram, + title: str, + filename: str, + ): + """ + Executes a command. + + The project's development pattern is a file with the respective name of the parent + resource (e.g. compute, network), classes of child resources inside this file and run() method to execute + respective check. So it makes sense to load dynamically. + """ + # Iterate to get all modules + message_handler("\nInspecting resources", "HEADER") + providers = [] + for name in os.listdir( + dirname(__file__) + "/../provider/" + provider + "/resource" + ): + if name.endswith(".py"): + # strip the extension + module = name[:-3] + + # Load and call all run check + for nameclass, cls in inspect.getmembers( + importlib.import_module( + "provider." + provider + ".resource." + module + ), + inspect.isclass, + ): + if ( + issubclass(cls, ResourceProvider) + and cls is not ResourceProvider + ): + providers.append((nameclass, cls)) + providers.sort(key=lambda x: x[0]) + + all_resources: List[Resource] = [] + resource_relations: List[ResourceEdge] = [] + + with ThreadPoolExecutor(15) as executor: + provider_results = executor.map( + lambda data: execute_provider(options, data), providers + ) + + for provider_results in provider_results: + if provider_results[0] is not None: + all_resources.extend(provider_results[0]) + if provider_results[1] is not None: + resource_relations.extend(provider_results[1]) + + unique_resources_dict: Dict[ResourceDigest, Resource] = dict() + for resource in all_resources: + unique_resources_dict[resource.digest] = resource + + unique_resources = list(unique_resources_dict.values()) + + unique_resources.sort(key=lambda x: x.group + x.digest.type + x.name) + resource_relations.sort( + key=lambda x: x.from_node.type + + x.from_node.id + + x.to_node.type + + x.to_node.id + ) + + # Resource filtering and sorting + filtered_resources = filter_resources(unique_resources, self.filters) + filtered_resources.sort(key=lambda x: x.group + x.digest.type + x.name) + + # Relationships filtering and sorting + filtered_relations = filter_relations(filtered_resources, resource_relations) + filtered_relations.sort( + key=lambda x: x.from_node.type + + x.from_node.id + + x.to_node.type + + x.to_node.id + ) + + # Diagram integration + diagram_builder.build( + resources=filtered_resources, + resource_relations=filtered_relations, + title=title, + filename=filename, + ) + + # TODO: Generate reports in json/csv/pdf/xls + report = Report() + report.general_report( + resources=filtered_resources, resource_relations=filtered_relations + ), + report.html_report( + resources=filtered_resources, + resource_relations=filtered_relations, + title=title, + filename=filename, + ) + + # TODO: Export in csv/json/yaml/tf... future... + # ....exporttf(checks).... diff --git a/cloudiscovery/shared/diagram.py b/cloudiscovery/shared/diagram.py index 34671b2..c535970 100644 --- a/cloudiscovery/shared/diagram.py +++ b/cloudiscovery/shared/diagram.py @@ -3,7 +3,7 @@ from diagrams import Diagram, Cluster, Edge -from shared.common import Resource, ResourceEdge, ResourceDigest +from shared.common import Resource, ResourceEdge, ResourceDigest, message_handler from shared.error_handler import exception PATH_DIAGRAM_OUTPUT = "./assets/diagrams/" @@ -214,6 +214,10 @@ class Mapsources: "aws_iotsitewise": "IotSitewise", "aws_neptune_cluster": "Neptune", "aws_alexa_for_business": "AlexaForBusiness", + "aws_customer_gateway": "SiteToSiteVpn", + "aws_vpn_connection": "SiteToSiteVpn", + "aws_vpn_gateway": "SiteToSiteVpn", + "aws_vpn_client_endpoint": "ClientVpn", } @@ -282,16 +286,21 @@ def generate_diagram( ordered_resources, initial_resource_relations ) + output_filename = PATH_DIAGRAM_OUTPUT + filename with Diagram( name=title, - filename=PATH_DIAGRAM_OUTPUT + filename, + filename=output_filename, direction="TB", + show=False, graph_attr={"nodesep": "2.0", "ranksep": "1.0", "splines": "curved"}, ) as d: d.dot.engine = self.engine self.draw_diagram(ordered_resources=ordered_resources, relations=relations) + message_handler("\n\nPNG diagram generated", "HEADER") + message_handler("Check your diagram: " + output_filename + ".png", "OKBLUE") + def draw_diagram(self, ordered_resources, relations): already_drawn_elements = {} diff --git a/cloudiscovery/shared/error_handler.py b/cloudiscovery/shared/error_handler.py index 0be51cd..835182f 100644 --- a/cloudiscovery/shared/error_handler.py +++ b/cloudiscovery/shared/error_handler.py @@ -11,13 +11,17 @@ def wrapper(*args, **kwargs): return func(*args, **kwargs) # pylint: disable=broad-except except Exception as e: - if "Could not connect to the endpoint URL" in str(e): + exception_str = str(e) + if ( + "Could not connect to the endpoint URL" in exception_str + or "the specified service does not exist" in exception_str + ): message = "\nThe service {} is not available in this region".format( func.__qualname__ ) else: message = "\nError running check {}. Error message {}".format( - func.__qualname__, str(e) + func.__qualname__, exception_str ) log_critical(message) diff --git a/cloudiscovery/shared/report.py b/cloudiscovery/shared/report.py index 9edbcc8..977517e 100644 --- a/cloudiscovery/shared/report.py +++ b/cloudiscovery/shared/report.py @@ -26,25 +26,74 @@ def general_report( message_handler("\n\nFound resources", "HEADER") for resource in resources: - message = "resource type: {} - resource id: {} - resource name: {} - resource details: {}".format( - resource.digest.type, - resource.digest.id, - resource.name, - resource.details, - ) - - message_handler(message, "OKBLUE") - - message_handler("\n\nFound relations", "HEADER") - for resource_relation in resource_relations: - message = "resource type: {} - resource id: {} -> resource type: {} - resource id: {}".format( - resource_relation.from_node.type, - resource_relation.from_node.id, - resource_relation.to_node.type, - resource_relation.to_node.id, - ) - - message_handler(message, "OKBLUE") + # Report to limit + if resource.limits: + usage = ( + str(resource.limits.usage) + + " - " + + str(resource.limits.percent) + + "%" + ) + # pylint: disable=line-too-long + message_handler( + "service: {} - quota code: {} - quota name: {} - aws default quota: {} - applied quota: {} - usage: {}".format( # noqa: E501 + resource.limits.service, + resource.limits.quota_code, + resource.limits.quota_name, + resource.limits.aws_limit, + resource.limits.local_limit, + usage, + ), + "OKBLUE", + ) + elif resource.attributes: + # pylint: disable=too-many-format-args + message_handler( + "\nservice: {} - type: {} - id: {} - resource name: {}".format( + resource.group, + resource.digest.type, + resource.digest.id, + resource.name, + resource.details, + ), + "OKBLUE", + ) + for ( + resource_attr_key, + resource_attr_value, + ) in resource.attributes.items(): + message_handler( + "service: {} - type: {} - id: {} -> {}: {}".format( + resource.group, + resource.digest.type, + resource.digest.id, + resource_attr_key, + resource_attr_value, + ), + "OKBLUE", + ) + else: + message_handler( + "type: {} - id: {} - name: {} - details: {}".format( + resource.digest.type, + resource.digest.id, + resource.name, + resource.details, + ), + "OKBLUE", + ) + + if resource_relations: + message_handler("\n\nFound relations", "HEADER") + for resource_relation in resource_relations: + message = "type: {} - id: {} -> type: {} - id: {}".format( + resource_relation.from_node.type, + resource_relation.from_node.id, + resource_relation.to_node.type, + resource_relation.to_node.id, + ) + + message_handler(message, "OKBLUE") @exception def html_report( @@ -69,19 +118,29 @@ def html_report( with open(image_name, "rb") as image_file: diagram_image = base64.b64encode(image_file.read()).decode("utf-8") - html_output = dir_template.get_template("report_html.html").render( - default_name=title, - resources_found=resources, - resources_relations=resource_relations, - diagram_image=diagram_image, - ) - - self.make_directories() - - name_output = PATH_REPORT_HTML_OUTPUT + filename + ".html" - - with open(name_output, "w") as file_output: - file_output.write(html_output) - - message_handler("\n\nHTML report generated", "HEADER") - message_handler("Check your HTML report: " + name_output, "OKBLUE") + group_title = "Group" + if resources: + if resources[0].limits: + html_output = dir_template.get_template("report_limits.html").render( + default_name=title, resources_found=resources + ) + else: + if resources[0].attributes: + group_title = "Service" + html_output = dir_template.get_template("report_html.html").render( + default_name=title, + resources_found=resources, + resources_relations=resource_relations, + diagram_image=diagram_image, + group_title=group_title, + ) + + self.make_directories() + + name_output = PATH_REPORT_HTML_OUTPUT + filename + ".html" + + with open(name_output, "w") as file_output: + file_output.write(html_output) + + message_handler("\n\nHTML report generated", "HEADER") + message_handler("Check your HTML report: " + name_output, "OKBLUE") diff --git a/cloudiscovery/templates/report_html.html b/cloudiscovery/templates/report_html.html index f1cdf0d..2945f76 100644 --- a/cloudiscovery/templates/report_html.html +++ b/cloudiscovery/templates/report_html.html @@ -5,7 +5,7 @@ Type -Group +{{ group_title }} Id Name Details @@ -17,7 +17,12 @@ {{ resource_found.group}} {{ resource_found.digest.id}} {{ resource_found.name}} - {{ resource_found.detail}} + + {{ resource_found.details}} + {% for attribute_key, attribute_value in resource_found.attributes.items() %} + {{ attribute_key}}: {{ attribute_value}}
+ {%- endfor %} + {% for tag in resource_found.tags %} {{ tag.key}}: {{ tag.value}}
@@ -27,25 +32,27 @@ {%- endfor %} -

Found relations

- - - - - - - - -{% for resource_relations in resources_relations %} - - - - - - -{%- endfor %} - -
From typeFrom idTo typeTo id
{{ resource_relations.from_node.type}}{{ resource_relations.from_node.id}}{{ resource_relations.to_node.type}}{{ resource_relations.to_node.id}}
+{% if resources_relations|length > 0 %} +

Found relations

+ + + + + + + + + {% for resource_relations in resources_relations %} + + + + + + + {%- endfor %} + +
From typeFrom idTo typeTo id
{{ resource_relations.from_node.type}}{{ resource_relations.from_node.id}}{{ resource_relations.to_node.type}}{{ resource_relations.to_node.id}}
+{%endif %} {% if diagram_image is not none %}

Diagram

{% set base64img = "data:image/png;base64," + diagram_image %} diff --git a/cloudiscovery/templates/report_limits.html b/cloudiscovery/templates/report_limits.html new file mode 100644 index 0000000..22fcdda --- /dev/null +++ b/cloudiscovery/templates/report_limits.html @@ -0,0 +1,46 @@ +

cloudiscovery - A tool to help you discover resources in the cloud environment.

+

{{ default_name }}

+

Limits

+ + + + + + + + + + + +{% for resource_found in resources_found %} + {% set percent = resource_found.limits.percent %} + + {% if percent <= 70 %} + {% set color = "rgb(0,128,0)" %} + {% set message = "OK" %} + {% elif percent > 70 and percent <= 90 %} + {% set color = "rgb(0,0,139)" %} + {% set message = "Attention" %} + {% elif percent > 90 %} + {% set color = "rgb(255,0,0)" %} + {% set message = "Risk" %} + {% endif %} + + + + + + + + + +{%- endfor %} + +
ServiceQuota codeQuota nameAWS default quotaApplied quotaUsageUsage percent
{{ resource_found.limits.service}}{{ resource_found.limits.quota_code}}{{ resource_found.limits.quota_name}}{{ resource_found.limits.aws_limit}}{{ resource_found.limits.local_limit}}{{ resource_found.limits.usage}}{{ percent }}% - {{ message }} + + + Sorry, your browser does not support inline SVG. + +
+ +

 

\ No newline at end of file diff --git a/cloudiscovery/tests/provider/all/resource/test_all.py b/cloudiscovery/tests/provider/all/resource/test_all.py new file mode 100644 index 0000000..d6a4506 --- /dev/null +++ b/cloudiscovery/tests/provider/all/resource/test_all.py @@ -0,0 +1,70 @@ +from unittest import TestCase + +from assertpy import assert_that + +from provider.all.resource.all import ( + retrieve_resource_name, + retrieve_resource_id, + last_singular_name_element, + operation_allowed, + build_resource_type, +) + + +class TestAllDiagram(TestCase): + def test_last_singular_name_element(self): + assert_that(last_singular_name_element("ListValues")).is_equal_to("Value") + assert_that(last_singular_name_element("DescribeSomeValues")).is_equal_to( + "Value" + ) + + def test_retrieve_resource_name(self): + assert_that( + retrieve_resource_name({"name": "value"}, "ListValues") + ).is_equal_to("value") + + assert_that( + retrieve_resource_name({"ValueName": "value"}, "ListValues") + ).is_equal_to("value") + assert_that( + retrieve_resource_name({"SomeName": "value"}, "ListValues") + ).is_equal_to("value") + + def test_retrieve_resource_id(self): + assert_that( + retrieve_resource_id({"id": "123"}, "ListValues", "value") + ).is_equal_to("123") + + assert_that( + retrieve_resource_id({"arn": "123"}, "ListValues", "value") + ).is_equal_to("123") + + assert_that( + retrieve_resource_id({"ValueName": "value"}, "ListValues", "value") + ).is_equal_to("value") + + assert_that( + retrieve_resource_id({"ValueId": "123"}, "ListValues", "value") + ).is_equal_to("123") + assert_that( + retrieve_resource_id({"ValueArn": "123"}, "ListValues", "value") + ).is_equal_to("123") + assert_that( + retrieve_resource_id({"someId": "123"}, "ListValues", "value") + ).is_equal_to("123") + assert_that( + retrieve_resource_id({"someArn": "123"}, "ListValues", "value") + ).is_equal_to("123") + + def test_operation_allowed(self): + assert_that(operation_allowed(["iam:List*"], "iam", "ListRoles")).is_equal_to( + True + ) + assert_that(operation_allowed(["ecs:List*"], "iam", "ListRoles")).is_equal_to( + False + ) + + def test_build_resource_type(self): + assert_that(build_resource_type("rds", "DescribeDBParameterGroup")).is_equal_to( + "aws_rds_db_parameter_group" + ) diff --git a/requirements.txt b/requirements.txt index bb44048..f1d57be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ boto3 ipaddress jinja2<3.0 diagrams>=0.14 -cachetools \ No newline at end of file +cachetools +diskcache \ No newline at end of file diff --git a/setup.py b/setup.py index 557bdf5..698180d 100755 --- a/setup.py +++ b/setup.py @@ -12,7 +12,14 @@ VERSION_RE = re.compile(r"""__version__ = ['"]([0-9.]+)['"]""") -requires = ["boto3", "ipaddress", "diagrams>=0.13", "jinja2<3.0", "cachetools"] +requires = [ + "boto3", + "ipaddress", + "diagrams>=0.13", + "jinja2<3.0", + "cachetools", + "diskcache", +] def get_version():